blob: 23df1fbad4b4ed47e74fdea8de3f9fd1991775b0 [file] [log] [blame]
Shailabh Nagarc7572492006-07-14 00:24:40 -07001/*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/taskstats_kern.h>
Jay Lanf3cef7a2006-09-30 23:28:55 -070021#include <linux/tsacct_kern.h>
Shailabh Nagar6f449932006-07-14 00:24:41 -070022#include <linux/delayacct.h>
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070023#include <linux/cpumask.h>
24#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Balbir Singh846c7bb2007-10-18 23:39:44 -070026#include <linux/cgroupstats.h>
27#include <linux/cgroup.h>
28#include <linux/fs.h>
29#include <linux/file.h>
Eric W. Biederman4bd6e322012-02-07 17:56:49 -080030#include <linux/pid_namespace.h>
Shailabh Nagarc7572492006-07-14 00:24:40 -070031#include <net/genetlink.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Shailabh Nagarc7572492006-07-14 00:24:40 -070033
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070034/*
35 * Maximum length of a cpumask that can be specified in
36 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
37 */
38#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
39
Vegard Nossumb81f3ea2008-07-25 01:48:55 -070040static DEFINE_PER_CPU(__u32, taskstats_seqnum);
Shailabh Nagarc7572492006-07-14 00:24:40 -070041static int family_registered;
Christoph Lametere18b8902006-12-06 20:33:20 -080042struct kmem_cache *taskstats_cache;
Shailabh Nagarc7572492006-07-14 00:24:40 -070043
44static struct genl_family family = {
45 .id = GENL_ID_GENERATE,
46 .name = TASKSTATS_GENL_NAME,
47 .version = TASKSTATS_GENL_VERSION,
48 .maxattr = TASKSTATS_CMD_ATTR_MAX,
49};
50
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000051static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
Shailabh Nagarc7572492006-07-14 00:24:40 -070052 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070054 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
56
WANG Cong243d5212016-11-03 09:42:36 -070057/*
58 * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
59 * Make sure they are always aligned.
60 */
61static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
Balbir Singh846c7bb2007-10-18 23:39:44 -070062 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
63};
64
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070065struct listener {
66 struct list_head list;
67 pid_t pid;
Shailabh Nagarbb129992006-07-14 00:24:47 -070068 char valid;
Shailabh Nagarc7572492006-07-14 00:24:40 -070069};
70
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070071struct listener_list {
72 struct rw_semaphore sem;
73 struct list_head list;
74};
75static DEFINE_PER_CPU(struct listener_list, listener_array);
76
77enum actions {
78 REGISTER,
79 DEREGISTER,
80 CPU_DONT_CARE
81};
Shailabh Nagarc7572492006-07-14 00:24:40 -070082
83static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
Oleg Nesterov371674852006-12-06 20:36:55 -080084 size_t size)
Shailabh Nagarc7572492006-07-14 00:24:40 -070085{
86 struct sk_buff *skb;
87 void *reply;
88
89 /*
90 * If new attributes are added, please revisit this allocation
91 */
Thomas Graf3dabc712006-11-14 19:44:52 -080092 skb = genlmsg_new(size, GFP_KERNEL);
Shailabh Nagarc7572492006-07-14 00:24:40 -070093 if (!skb)
94 return -ENOMEM;
95
96 if (!info) {
Christoph Lametercd85fc52010-12-08 17:42:22 +010097 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
Shailabh Nagarc7572492006-07-14 00:24:40 -070098
Thomas Graf17c157c2006-11-14 19:46:02 -080099 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700100 } else
Thomas Graf17c157c2006-11-14 19:46:02 -0800101 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700102 if (reply == NULL) {
103 nlmsg_free(skb);
104 return -EINVAL;
105 }
106
107 *skbp = skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700108 return 0;
109}
110
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700111/*
112 * Send taskstats data in @skb to listener with nl_pid @pid
113 */
Johannes Berg134e6372009-07-10 09:51:34 +0000114static int send_reply(struct sk_buff *skb, struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700115{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700116 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700117 void *reply = genlmsg_data(genlhdr);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700118
Johannes Berg053c0952015-01-16 22:09:00 +0100119 genlmsg_end(skb, reply);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700120
Johannes Berg134e6372009-07-10 09:51:34 +0000121 return genlmsg_reply(skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700122}
123
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700124/*
125 * Send taskstats data in @skb to listeners registered for @cpu's exit data
126 */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800127static void send_cpu_listeners(struct sk_buff *skb,
128 struct listener_list *listeners)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700129{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700130 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700131 struct listener *s, *tmp;
132 struct sk_buff *skb_next, *skb_cur = skb;
133 void *reply = genlmsg_data(genlhdr);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700134 int rc, delcount = 0;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700135
Johannes Berg053c0952015-01-16 22:09:00 +0100136 genlmsg_end(skb, reply);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700137
138 rc = 0;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700139 down_read(&listeners->sem);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700140 list_for_each_entry(s, &listeners->list, list) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700141 skb_next = NULL;
142 if (!list_is_last(&s->list, &listeners->list)) {
143 skb_next = skb_clone(skb_cur, GFP_KERNEL);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700144 if (!skb_next)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700145 break;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700146 }
Johannes Berg134e6372009-07-10 09:51:34 +0000147 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700148 if (rc == -ECONNREFUSED) {
Shailabh Nagarbb129992006-07-14 00:24:47 -0700149 s->valid = 0;
150 delcount++;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700151 }
152 skb_cur = skb_next;
153 }
Shailabh Nagarbb129992006-07-14 00:24:47 -0700154 up_read(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700155
Shailabh Nagard94a0412006-07-30 03:03:11 -0700156 if (skb_cur)
157 nlmsg_free(skb_cur);
158
Shailabh Nagarbb129992006-07-14 00:24:47 -0700159 if (!delcount)
Shailabh Nagard94a0412006-07-30 03:03:11 -0700160 return;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700161
162 /* Delete invalidated entries */
163 down_write(&listeners->sem);
164 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
165 if (!s->valid) {
166 list_del(&s->list);
167 kfree(s);
168 }
169 }
170 up_write(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700171}
172
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800173static void fill_stats(struct user_namespace *user_ns,
174 struct pid_namespace *pid_ns,
175 struct task_struct *tsk, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700176{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800177 memset(stats, 0, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700178 /*
179 * Each accounting subsystem adds calls to its functions to
180 * fill in relevant parts of struct taskstsats as follows
181 *
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700182 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700183 */
184
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700185 delayacct_add_tsk(stats, tsk);
Jay Lanf3cef7a2006-09-30 23:28:55 -0700186
187 /* fill in basic acct fields */
Shailabh Nagar6f449932006-07-14 00:24:41 -0700188 stats->version = TASKSTATS_VERSION;
Maxim Uvarovb663a792007-07-15 23:40:48 -0700189 stats->nvcsw = tsk->nvcsw;
190 stats->nivcsw = tsk->nivcsw;
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800191 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700192
Jay Lan9acc1852006-09-30 23:28:58 -0700193 /* fill in extended acct fields */
194 xacct_add_tsk(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700195}
196
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700197static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700198{
Oleg Nesterova98b6092006-10-28 10:38:54 -0700199 struct task_struct *tsk;
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700200
201 rcu_read_lock();
202 tsk = find_task_by_vpid(pid);
203 if (tsk)
204 get_task_struct(tsk);
205 rcu_read_unlock();
206 if (!tsk)
207 return -ESRCH;
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800208 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700209 put_task_struct(tsk);
210 return 0;
211}
212
213static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
214{
215 struct task_struct *tsk, *first;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700216 unsigned long flags;
Oleg Nesterova98b6092006-10-28 10:38:54 -0700217 int rc = -ESRCH;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700218
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700219 /*
220 * Add additional stats from live tasks except zombie thread group
221 * leaders who are already counted with the dead tasks
222 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700223 rcu_read_lock();
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700224 first = find_task_by_vpid(tgid);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700225
Oleg Nesterova98b6092006-10-28 10:38:54 -0700226 if (!first || !lock_task_sighand(first, &flags))
227 goto out;
228
229 if (first->signal->stats)
230 memcpy(stats, first->signal->stats, sizeof(*stats));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800231 else
232 memset(stats, 0, sizeof(*stats));
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700233
Shailabh Nagarc7572492006-07-14 00:24:40 -0700234 tsk = first;
235 do {
Oleg Nesterovd7c3f5f2006-10-28 10:38:54 -0700236 if (tsk->exit_state)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700237 continue;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700238 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700239 * Accounting subsystem can call its functions here to
Shailabh Nagarc7572492006-07-14 00:24:40 -0700240 * fill in relevant parts of struct taskstsats as follows
241 *
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700242 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700243 */
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700244 delayacct_add_tsk(stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700245
Maxim Uvarovb663a792007-07-15 23:40:48 -0700246 stats->nvcsw += tsk->nvcsw;
247 stats->nivcsw += tsk->nivcsw;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700248 } while_each_thread(first, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700249
Oleg Nesterova98b6092006-10-28 10:38:54 -0700250 unlock_task_sighand(first, &flags);
251 rc = 0;
252out:
253 rcu_read_unlock();
254
255 stats->version = TASKSTATS_VERSION;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700256 /*
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200257 * Accounting subsystems can also add calls here to modify
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700258 * fields of taskstats.
Shailabh Nagarc7572492006-07-14 00:24:40 -0700259 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700260 return rc;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700261}
262
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700263static void fill_tgid_exit(struct task_struct *tsk)
264{
265 unsigned long flags;
266
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700267 spin_lock_irqsave(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700268 if (!tsk->signal->stats)
269 goto ret;
270
271 /*
272 * Each accounting subsystem calls its functions here to
273 * accumalate its per-task stats for tsk, into the per-tgid structure
274 *
275 * per-task-foo(tsk->signal->stats, tsk);
276 */
277 delayacct_add_tsk(tsk->signal->stats, tsk);
278ret:
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700279 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700280 return;
281}
282
Rusty Russell41c7bb92009-01-01 10:12:28 +1030283static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700284{
285 struct listener_list *listeners;
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700286 struct listener *s, *tmp, *s2;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700287 unsigned int cpu;
Chen Gang0d206332013-11-12 15:11:23 -0800288 int ret = 0;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700289
Rusty Russell41c7bb92009-01-01 10:12:28 +1030290 if (!cpumask_subset(mask, cpu_possible_mask))
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700291 return -EINVAL;
292
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800293 if (current_user_ns() != &init_user_ns)
294 return -EINVAL;
295
296 if (task_active_pid_ns(current) != &init_pid_ns)
297 return -EINVAL;
298
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700299 if (isadd == REGISTER) {
Rusty Russell41c7bb92009-01-01 10:12:28 +1030300 for_each_cpu(cpu, mask) {
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700301 s = kmalloc_node(sizeof(struct listener),
302 GFP_KERNEL, cpu_to_node(cpu));
Chen Gang0d206332013-11-12 15:11:23 -0800303 if (!s) {
304 ret = -ENOMEM;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700305 goto cleanup;
Chen Gang0d206332013-11-12 15:11:23 -0800306 }
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700307 s->pid = pid;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700308 s->valid = 1;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700309
310 listeners = &per_cpu(listener_array, cpu);
311 down_write(&listeners->sem);
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700312 list_for_each_entry(s2, &listeners->list, list) {
Oleg Nesterova7295892011-08-03 16:21:05 -0700313 if (s2->pid == pid && s2->valid)
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700314 goto exists;
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700315 }
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700316 list_add(&s->list, &listeners->list);
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700317 s = NULL;
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700318exists:
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700319 up_write(&listeners->sem);
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700320 kfree(s); /* nop if NULL */
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700321 }
322 return 0;
323 }
324
325 /* Deregister or cleanup */
326cleanup:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030327 for_each_cpu(cpu, mask) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700328 listeners = &per_cpu(listener_array, cpu);
329 down_write(&listeners->sem);
330 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
331 if (s->pid == pid) {
332 list_del(&s->list);
333 kfree(s);
334 break;
335 }
336 }
337 up_write(&listeners->sem);
338 }
Chen Gang0d206332013-11-12 15:11:23 -0800339 return ret;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700340}
341
Rusty Russell41c7bb92009-01-01 10:12:28 +1030342static int parse(struct nlattr *na, struct cpumask *mask)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700343{
344 char *data;
345 int len;
346 int ret;
347
348 if (na == NULL)
349 return 1;
350 len = nla_len(na);
351 if (len > TASKSTATS_CPUMASK_MAXLEN)
352 return -E2BIG;
353 if (len < 1)
354 return -EINVAL;
355 data = kmalloc(len, GFP_KERNEL);
356 if (!data)
357 return -ENOMEM;
358 nla_strlcpy(data, na, len);
Rusty Russell29c01772008-12-13 21:20:25 +1030359 ret = cpulist_parse(data, mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700360 kfree(data);
361 return ret;
362}
363
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800364static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
Oleg Nesterov68062b82006-12-06 20:36:53 -0800365{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800366 struct nlattr *na, *ret;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800367 int aggr;
368
Oleg Nesterov371674852006-12-06 20:36:55 -0800369 aggr = (type == TASKSTATS_TYPE_PID)
370 ? TASKSTATS_TYPE_AGGR_PID
371 : TASKSTATS_TYPE_AGGR_TGID;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800372
373 na = nla_nest_start(skb, aggr);
Oleg Nesterov371674852006-12-06 20:36:55 -0800374 if (!na)
375 goto err;
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800376
Chen Gang3fa58262013-11-12 15:11:22 -0800377 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
378 nla_nest_cancel(skb, na);
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800379 goto err;
Chen Gang3fa58262013-11-12 15:11:22 -0800380 }
Nicolas Dichtel80df5542016-04-22 17:31:24 +0200381 ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
382 sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
Chen Gang3fa58262013-11-12 15:11:22 -0800383 if (!ret) {
384 nla_nest_cancel(skb, na);
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800385 goto err;
Chen Gang3fa58262013-11-12 15:11:22 -0800386 }
Oleg Nesterov68062b82006-12-06 20:36:53 -0800387 nla_nest_end(skb, na);
388
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800389 return nla_data(ret);
390err:
391 return NULL;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800392}
393
Balbir Singh846c7bb2007-10-18 23:39:44 -0700394static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
395{
396 int rc = 0;
397 struct sk_buff *rep_skb;
398 struct cgroupstats *stats;
399 struct nlattr *na;
400 size_t size;
401 u32 fd;
Al Viro2903ff02012-08-28 12:52:22 -0400402 struct fd f;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700403
404 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
405 if (!na)
406 return -EINVAL;
407
408 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
Al Viro2903ff02012-08-28 12:52:22 -0400409 f = fdget(fd);
410 if (!f.file)
Adrian Bunkf9615982007-11-14 17:00:37 -0800411 return 0;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700412
Adrian Bunkf9615982007-11-14 17:00:37 -0800413 size = nla_total_size(sizeof(struct cgroupstats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700414
Adrian Bunkf9615982007-11-14 17:00:37 -0800415 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
416 size);
417 if (rc < 0)
418 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700419
Adrian Bunkf9615982007-11-14 17:00:37 -0800420 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
421 sizeof(struct cgroupstats));
Alan Cox25353b32012-07-30 14:42:49 -0700422 if (na == NULL) {
Jesper Juhl0324b5a2012-10-04 17:16:52 -0700423 nlmsg_free(rep_skb);
Alan Cox25353b32012-07-30 14:42:49 -0700424 rc = -EMSGSIZE;
425 goto err;
426 }
427
Adrian Bunkf9615982007-11-14 17:00:37 -0800428 stats = nla_data(na);
429 memset(stats, 0, sizeof(*stats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700430
Al Virob5830432014-10-31 01:22:04 -0400431 rc = cgroupstats_build(stats, f.file->f_path.dentry);
Adrian Bunkf9615982007-11-14 17:00:37 -0800432 if (rc < 0) {
433 nlmsg_free(rep_skb);
434 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700435 }
436
Johannes Berg134e6372009-07-10 09:51:34 +0000437 rc = send_reply(rep_skb, info);
Adrian Bunkf9615982007-11-14 17:00:37 -0800438
Balbir Singh846c7bb2007-10-18 23:39:44 -0700439err:
Al Viro2903ff02012-08-28 12:52:22 -0400440 fdput(f);
Balbir Singh846c7bb2007-10-18 23:39:44 -0700441 return rc;
442}
443
Michael Holzheu93233122010-10-27 15:34:44 -0700444static int cmd_attr_register_cpumask(struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700445{
Rusty Russell41c7bb92009-01-01 10:12:28 +1030446 cpumask_var_t mask;
Michael Holzheu93233122010-10-27 15:34:44 -0700447 int rc;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700448
Rusty Russell41c7bb92009-01-01 10:12:28 +1030449 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
450 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030451 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700452 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700453 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +0000454 rc = add_del_listener(info->snd_portid, mask, REGISTER);
Michael Holzheu93233122010-10-27 15:34:44 -0700455out:
456 free_cpumask_var(mask);
457 return rc;
458}
Rusty Russell41c7bb92009-01-01 10:12:28 +1030459
Michael Holzheu93233122010-10-27 15:34:44 -0700460static int cmd_attr_deregister_cpumask(struct genl_info *info)
461{
462 cpumask_var_t mask;
463 int rc;
464
465 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
466 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030467 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
468 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700469 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +0000470 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
Michael Holzheu93233122010-10-27 15:34:44 -0700471out:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030472 free_cpumask_var(mask);
Michael Holzheu93233122010-10-27 15:34:44 -0700473 return rc;
474}
Shailabh Nagarc7572492006-07-14 00:24:40 -0700475
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800476static size_t taskstats_packet_size(void)
477{
478 size_t size;
479
480 size = nla_total_size(sizeof(u32)) +
Nicolas Dichtel80df5542016-04-22 17:31:24 +0200481 nla_total_size_64bit(sizeof(struct taskstats)) +
482 nla_total_size(0);
483
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800484 return size;
485}
486
Michael Holzheu93233122010-10-27 15:34:44 -0700487static int cmd_attr_pid(struct genl_info *info)
488{
489 struct taskstats *stats;
490 struct sk_buff *rep_skb;
491 size_t size;
492 u32 pid;
493 int rc;
494
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800495 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700496
Oleg Nesterov371674852006-12-06 20:36:55 -0800497 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700498 if (rc < 0)
499 return rc;
500
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800501 rc = -EINVAL;
Michael Holzheu93233122010-10-27 15:34:44 -0700502 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
503 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
504 if (!stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700505 goto err;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700506
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700507 rc = fill_stats_for_pid(pid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700508 if (rc < 0)
509 goto err;
Johannes Berg134e6372009-07-10 09:51:34 +0000510 return send_reply(rep_skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700511err:
512 nlmsg_free(rep_skb);
513 return rc;
514}
515
Michael Holzheu93233122010-10-27 15:34:44 -0700516static int cmd_attr_tgid(struct genl_info *info)
517{
518 struct taskstats *stats;
519 struct sk_buff *rep_skb;
520 size_t size;
521 u32 tgid;
522 int rc;
523
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800524 size = taskstats_packet_size();
Michael Holzheu93233122010-10-27 15:34:44 -0700525
526 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
527 if (rc < 0)
528 return rc;
529
530 rc = -EINVAL;
531 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
532 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
533 if (!stats)
534 goto err;
535
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700536 rc = fill_stats_for_tgid(tgid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700537 if (rc < 0)
538 goto err;
539 return send_reply(rep_skb, info);
540err:
541 nlmsg_free(rep_skb);
542 return rc;
543}
544
545static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
546{
547 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
548 return cmd_attr_register_cpumask(info);
549 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
550 return cmd_attr_deregister_cpumask(info);
551 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
552 return cmd_attr_pid(info);
553 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
554 return cmd_attr_tgid(info);
555 else
556 return -EINVAL;
557}
558
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800559static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
560{
561 struct signal_struct *sig = tsk->signal;
Christian Brauner36ed8b52019-10-09 13:48:09 +0200562 struct taskstats *stats_new, *stats;
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800563
Christian Brauner36ed8b52019-10-09 13:48:09 +0200564 /* Pairs with smp_store_release() below. */
565 stats = smp_load_acquire(&sig->stats);
566 if (stats || thread_group_empty(tsk))
567 return stats;
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800568
569 /* No problem if kmem_cache_zalloc() fails */
Christian Brauner36ed8b52019-10-09 13:48:09 +0200570 stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800571
572 spin_lock_irq(&tsk->sighand->siglock);
Christian Brauner36ed8b52019-10-09 13:48:09 +0200573 stats = sig->stats;
574 if (!stats) {
575 /*
576 * Pairs with smp_store_release() above and order the
577 * kmem_cache_zalloc().
578 */
579 smp_store_release(&sig->stats, stats_new);
580 stats = stats_new;
581 stats_new = NULL;
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800582 }
583 spin_unlock_irq(&tsk->sighand->siglock);
584
Christian Brauner36ed8b52019-10-09 13:48:09 +0200585 if (stats_new)
586 kmem_cache_free(taskstats_cache, stats_new);
587
588 return stats;
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800589}
590
Shailabh Nagarc7572492006-07-14 00:24:40 -0700591/* Send pid data out on exit */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800592void taskstats_exit(struct task_struct *tsk, int group_dead)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700593{
594 int rc;
Oleg Nesterov115085e2006-12-06 20:36:51 -0800595 struct listener_list *listeners;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800596 struct taskstats *stats;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700597 struct sk_buff *rep_skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700598 size_t size;
599 int is_thread_group;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700600
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800601 if (!family_registered)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700602 return;
603
Shailabh Nagarc7572492006-07-14 00:24:40 -0700604 /*
605 * Size includes space for nested attributes
606 */
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800607 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700608
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800609 is_thread_group = !!taskstats_tgid_alloc(tsk);
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800610 if (is_thread_group) {
611 /* PID + STATS + TGID + STATS */
612 size = 2 * size;
613 /* fill the tsk->signal->stats structure */
614 fill_tgid_exit(tsk);
615 }
616
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500617 listeners = raw_cpu_ptr(&listener_array);
Oleg Nesterov115085e2006-12-06 20:36:51 -0800618 if (list_empty(&listeners->list))
619 return;
620
Oleg Nesterov371674852006-12-06 20:36:55 -0800621 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700622 if (rc < 0)
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800623 return;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700624
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800625 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
626 task_pid_nr_ns(tsk, &init_pid_ns));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800627 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800628 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800629
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800630 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700631
Shailabh Nagarc7572492006-07-14 00:24:40 -0700632 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700633 * Doesn't matter if tsk is the leader or the last group member leaving
Shailabh Nagarc7572492006-07-14 00:24:40 -0700634 */
Oleg Nesterov68062b82006-12-06 20:36:53 -0800635 if (!is_thread_group || !group_dead)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700636 goto send;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700637
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800638 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
639 task_tgid_nr_ns(tsk, &init_pid_ns));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800640 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800641 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800642
643 memcpy(stats, tsk->signal->stats, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700644
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700645send:
Oleg Nesterov115085e2006-12-06 20:36:51 -0800646 send_cpu_listeners(rep_skb, listeners);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700647 return;
Oleg Nesterov371674852006-12-06 20:36:55 -0800648err:
Shailabh Nagarc7572492006-07-14 00:24:40 -0700649 nlmsg_free(rep_skb);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700650}
651
Johannes Berg4534de82013-11-14 17:14:46 +0100652static const struct genl_ops taskstats_ops[] = {
Johannes Berg88d36a92013-11-14 17:14:39 +0100653 {
654 .cmd = TASKSTATS_CMD_GET,
655 .doit = taskstats_user_cmd,
656 .policy = taskstats_cmd_get_policy,
657 .flags = GENL_ADMIN_PERM,
658 },
659 {
660 .cmd = CGROUPSTATS_CMD_GET,
661 .doit = cgroupstats_user_cmd,
662 .policy = cgroupstats_cmd_get_policy,
663 },
Balbir Singh846c7bb2007-10-18 23:39:44 -0700664};
665
Shailabh Nagarc7572492006-07-14 00:24:40 -0700666/* Needed early in initialization */
667void __init taskstats_init_early(void)
668{
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700669 unsigned int i;
670
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700671 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700672 for_each_possible_cpu(i) {
673 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
674 init_rwsem(&(per_cpu(listener_array, i).sem));
675 }
Shailabh Nagarc7572492006-07-14 00:24:40 -0700676}
677
678static int __init taskstats_init(void)
679{
680 int rc;
681
Johannes Bergc53ed742013-11-19 15:19:31 +0100682 rc = genl_register_family_with_ops(&family, taskstats_ops);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700683 if (rc)
684 return rc;
685
Shailabh Nagarc7572492006-07-14 00:24:40 -0700686 family_registered = 1;
Mandeep Singh Bainesf9b182e2011-03-23 16:43:27 -0700687 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700688 return 0;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700689}
690
691/*
692 * late initcall ensures initialization of statistics collection
693 * mechanisms precedes initialization of the taskstats interface
694 */
695late_initcall(taskstats_init);