blob: 18c5b9b16645dfa49a218d4b12253240f97310b7 [file] [log] [blame]
Matt Helsley9f460802005-11-07 00:59:16 -08001/*
2 * cn_proc.c - process events connector
3 *
4 * Copyright (C) Matt Helsley, IBM Corp. 2005
5 * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net>
6 * Original copyright notice follows:
7 * Copyright (C) 2005 BULL SA.
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/module.h>
26#include <linux/kernel.h>
Matt Helsleycaf3c9d2006-01-09 20:52:40 -080027#include <linux/ktime.h>
Matt Helsley9f460802005-11-07 00:59:16 -080028#include <linux/init.h>
Matt Helsley1d31a4e2006-06-23 02:05:42 -070029#include <linux/connector.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/gfp.h>
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +030031#include <linux/ptrace.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Eric W. Biederman9582d902012-02-07 16:48:16 -080033#include <linux/pid_namespace.h>
Arun Sharma600634972011-07-26 16:09:06 -070034
Matt Helsley9f460802005-11-07 00:59:16 -080035#include <linux/cn_proc.h>
36
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -050037/*
38 * Size of a cn_msg followed by a proc_event structure. Since the
39 * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
40 * add one 4-byte word to the size here, and then start the actual
41 * cn_msg structure 4 bytes into the stack buffer. The result is that
42 * the immediately following proc_event structure is aligned to 8 bytes.
43 */
44#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
45
46/* See comment above; we test our assumption about sizeof struct cn_msg here. */
47static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
48{
49 BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
50 return (struct cn_msg *)(buffer + 4);
51}
Matt Helsley9f460802005-11-07 00:59:16 -080052
53static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
54static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
55
David S. Millercc398c22006-01-08 01:03:34 -080056/* proc_event_counts is used as the sequence number of the netlink message */
Matt Helsley9f460802005-11-07 00:59:16 -080057static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
58
59static inline void get_seq(__u32 *ts, int *cpu)
60{
Christoph Lameter3ea9f682010-12-08 17:42:23 +010061 preempt_disable();
Valentin Ilief3c48ec2012-07-14 13:08:29 +000062 *ts = __this_cpu_inc_return(proc_event_counts) - 1;
Matt Helsley9f460802005-11-07 00:59:16 -080063 *cpu = smp_processor_id();
Christoph Lameter3ea9f682010-12-08 17:42:23 +010064 preempt_enable();
Matt Helsley9f460802005-11-07 00:59:16 -080065}
66
67void proc_fork_connector(struct task_struct *task)
68{
69 struct cn_msg *msg;
70 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -050071 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -070072 struct timespec ts;
Oleg Nesterov9e8f90d2011-07-28 18:26:32 -070073 struct task_struct *parent;
Matt Helsley9f460802005-11-07 00:59:16 -080074
75 if (atomic_read(&proc_event_num_listeners) < 1)
76 return;
77
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -050078 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +000079 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +020080 memset(&ev->event_data, 0, sizeof(ev->event_data));
Matt Helsley9f460802005-11-07 00:59:16 -080081 get_seq(&msg->seq, &ev->cpu);
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -070082 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -050083 ev->timestamp_ns = timespec_to_ns(&ts);
Matt Helsley9f460802005-11-07 00:59:16 -080084 ev->what = PROC_EVENT_FORK;
Oleg Nesterov9e8f90d2011-07-28 18:26:32 -070085 rcu_read_lock();
86 parent = rcu_dereference(task->real_parent);
87 ev->event_data.fork.parent_pid = parent->pid;
88 ev->event_data.fork.parent_tgid = parent->tgid;
89 rcu_read_unlock();
Matt Helsley9f460802005-11-07 00:59:16 -080090 ev->event_data.fork.child_pid = task->pid;
91 ev->event_data.fork.child_tgid = task->tgid;
92
93 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
94 msg->ack = 0; /* not used */
95 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +020096 msg->flags = 0; /* not used */
Matt Helsley9f460802005-11-07 00:59:16 -080097 /* If cn_netlink_send() failed, the data is not sent */
98 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
99}
100
101void proc_exec_connector(struct task_struct *task)
102{
103 struct cn_msg *msg;
104 struct proc_event *ev;
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -0700105 struct timespec ts;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500106 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Matt Helsley9f460802005-11-07 00:59:16 -0800107
108 if (atomic_read(&proc_event_num_listeners) < 1)
109 return;
110
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500111 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000112 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200113 memset(&ev->event_data, 0, sizeof(ev->event_data));
Matt Helsley9f460802005-11-07 00:59:16 -0800114 get_seq(&msg->seq, &ev->cpu);
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -0700115 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500116 ev->timestamp_ns = timespec_to_ns(&ts);
Matt Helsley9f460802005-11-07 00:59:16 -0800117 ev->what = PROC_EVENT_EXEC;
118 ev->event_data.exec.process_pid = task->pid;
119 ev->event_data.exec.process_tgid = task->tgid;
120
121 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
122 msg->ack = 0; /* not used */
123 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200124 msg->flags = 0; /* not used */
Matt Helsley9f460802005-11-07 00:59:16 -0800125 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
126}
127
128void proc_id_connector(struct task_struct *task, int which_id)
129{
130 struct cn_msg *msg;
131 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500132 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -0700133 struct timespec ts;
David Howellsc69e8d92008-11-14 10:39:19 +1100134 const struct cred *cred;
Matt Helsley9f460802005-11-07 00:59:16 -0800135
136 if (atomic_read(&proc_event_num_listeners) < 1)
137 return;
138
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500139 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000140 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200141 memset(&ev->event_data, 0, sizeof(ev->event_data));
Matt Helsley9f460802005-11-07 00:59:16 -0800142 ev->what = which_id;
143 ev->event_data.id.process_pid = task->pid;
144 ev->event_data.id.process_tgid = task->tgid;
David Howellsc69e8d92008-11-14 10:39:19 +1100145 rcu_read_lock();
146 cred = __task_cred(task);
Matt Helsley9f460802005-11-07 00:59:16 -0800147 if (which_id == PROC_EVENT_UID) {
Eric W. Biederman9582d902012-02-07 16:48:16 -0800148 ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
149 ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
Matt Helsley9f460802005-11-07 00:59:16 -0800150 } else if (which_id == PROC_EVENT_GID) {
Eric W. Biederman9582d902012-02-07 16:48:16 -0800151 ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
152 ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
David Howellsc69e8d92008-11-14 10:39:19 +1100153 } else {
154 rcu_read_unlock();
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000155 return;
David Howellsc69e8d92008-11-14 10:39:19 +1100156 }
157 rcu_read_unlock();
Matt Helsley9f460802005-11-07 00:59:16 -0800158 get_seq(&msg->seq, &ev->cpu);
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -0700159 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500160 ev->timestamp_ns = timespec_to_ns(&ts);
Matt Helsley9f460802005-11-07 00:59:16 -0800161
162 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
163 msg->ack = 0; /* not used */
164 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200165 msg->flags = 0; /* not used */
Matt Helsley9f460802005-11-07 00:59:16 -0800166 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
167}
168
Scott James Remnant02b51df2009-09-22 16:43:44 -0700169void proc_sid_connector(struct task_struct *task)
170{
171 struct cn_msg *msg;
172 struct proc_event *ev;
173 struct timespec ts;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500174 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Scott James Remnant02b51df2009-09-22 16:43:44 -0700175
176 if (atomic_read(&proc_event_num_listeners) < 1)
177 return;
178
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500179 msg = buffer_to_cn_msg(buffer);
Scott James Remnant02b51df2009-09-22 16:43:44 -0700180 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200181 memset(&ev->event_data, 0, sizeof(ev->event_data));
Scott James Remnant02b51df2009-09-22 16:43:44 -0700182 get_seq(&msg->seq, &ev->cpu);
183 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500184 ev->timestamp_ns = timespec_to_ns(&ts);
Scott James Remnant02b51df2009-09-22 16:43:44 -0700185 ev->what = PROC_EVENT_SID;
186 ev->event_data.sid.process_pid = task->pid;
187 ev->event_data.sid.process_tgid = task->tgid;
188
189 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
190 msg->ack = 0; /* not used */
191 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200192 msg->flags = 0; /* not used */
Scott James Remnant02b51df2009-09-22 16:43:44 -0700193 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
194}
195
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300196void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
197{
198 struct cn_msg *msg;
199 struct proc_event *ev;
200 struct timespec ts;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500201 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300202
203 if (atomic_read(&proc_event_num_listeners) < 1)
204 return;
205
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500206 msg = buffer_to_cn_msg(buffer);
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300207 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200208 memset(&ev->event_data, 0, sizeof(ev->event_data));
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300209 get_seq(&msg->seq, &ev->cpu);
210 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500211 ev->timestamp_ns = timespec_to_ns(&ts);
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300212 ev->what = PROC_EVENT_PTRACE;
213 ev->event_data.ptrace.process_pid = task->pid;
214 ev->event_data.ptrace.process_tgid = task->tgid;
215 if (ptrace_id == PTRACE_ATTACH) {
216 ev->event_data.ptrace.tracer_pid = current->pid;
217 ev->event_data.ptrace.tracer_tgid = current->tgid;
218 } else if (ptrace_id == PTRACE_DETACH) {
219 ev->event_data.ptrace.tracer_pid = 0;
220 ev->event_data.ptrace.tracer_tgid = 0;
221 } else
222 return;
223
224 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
225 msg->ack = 0; /* not used */
226 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200227 msg->flags = 0; /* not used */
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300228 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
229}
230
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000231void proc_comm_connector(struct task_struct *task)
232{
233 struct cn_msg *msg;
234 struct proc_event *ev;
235 struct timespec ts;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500236 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000237
238 if (atomic_read(&proc_event_num_listeners) < 1)
239 return;
240
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500241 msg = buffer_to_cn_msg(buffer);
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000242 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200243 memset(&ev->event_data, 0, sizeof(ev->event_data));
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000244 get_seq(&msg->seq, &ev->cpu);
245 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500246 ev->timestamp_ns = timespec_to_ns(&ts);
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000247 ev->what = PROC_EVENT_COMM;
248 ev->event_data.comm.process_pid = task->pid;
249 ev->event_data.comm.process_tgid = task->tgid;
250 get_task_comm(ev->event_data.comm.comm, task);
251
252 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
253 msg->ack = 0; /* not used */
254 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200255 msg->flags = 0; /* not used */
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000256 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
257}
258
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000259void proc_coredump_connector(struct task_struct *task)
260{
261 struct cn_msg *msg;
262 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500263 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000264 struct timespec ts;
265
266 if (atomic_read(&proc_event_num_listeners) < 1)
267 return;
268
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500269 msg = buffer_to_cn_msg(buffer);
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000270 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200271 memset(&ev->event_data, 0, sizeof(ev->event_data));
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000272 get_seq(&msg->seq, &ev->cpu);
273 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500274 ev->timestamp_ns = timespec_to_ns(&ts);
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000275 ev->what = PROC_EVENT_COREDUMP;
276 ev->event_data.coredump.process_pid = task->pid;
277 ev->event_data.coredump.process_tgid = task->tgid;
278
279 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
280 msg->ack = 0; /* not used */
281 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200282 msg->flags = 0; /* not used */
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000283 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
284}
285
Matt Helsley9f460802005-11-07 00:59:16 -0800286void proc_exit_connector(struct task_struct *task)
287{
288 struct cn_msg *msg;
289 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500290 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -0700291 struct timespec ts;
Matt Helsley9f460802005-11-07 00:59:16 -0800292
293 if (atomic_read(&proc_event_num_listeners) < 1)
294 return;
295
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500296 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000297 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200298 memset(&ev->event_data, 0, sizeof(ev->event_data));
Matt Helsley9f460802005-11-07 00:59:16 -0800299 get_seq(&msg->seq, &ev->cpu);
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -0700300 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500301 ev->timestamp_ns = timespec_to_ns(&ts);
Matt Helsley9f460802005-11-07 00:59:16 -0800302 ev->what = PROC_EVENT_EXIT;
303 ev->event_data.exit.process_pid = task->pid;
304 ev->event_data.exit.process_tgid = task->tgid;
305 ev->event_data.exit.exit_code = task->exit_code;
306 ev->event_data.exit.exit_signal = task->exit_signal;
307
308 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
309 msg->ack = 0; /* not used */
310 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200311 msg->flags = 0; /* not used */
Matt Helsley9f460802005-11-07 00:59:16 -0800312 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
313}
314
315/*
316 * Send an acknowledgement message to userspace
317 *
318 * Use 0 for success, EFOO otherwise.
319 * Note: this is the negative of conventional kernel error
320 * values because it's not being returned via syscall return
321 * mechanisms.
322 */
323static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
324{
325 struct cn_msg *msg;
326 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500327 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -0700328 struct timespec ts;
Matt Helsley9f460802005-11-07 00:59:16 -0800329
330 if (atomic_read(&proc_event_num_listeners) < 1)
331 return;
332
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500333 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000334 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200335 memset(&ev->event_data, 0, sizeof(ev->event_data));
Matt Helsley9f460802005-11-07 00:59:16 -0800336 msg->seq = rcvd_seq;
Chandra Seetharaman822cfbff2006-07-30 03:03:04 -0700337 ktime_get_ts(&ts); /* get high res monotonic timestamp */
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500338 ev->timestamp_ns = timespec_to_ns(&ts);
Matt Helsley9f460802005-11-07 00:59:16 -0800339 ev->cpu = -1;
340 ev->what = PROC_EVENT_NONE;
341 ev->event_data.ack.err = err;
342 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
343 msg->ack = rcvd_ack + 1;
344 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200345 msg->flags = 0; /* not used */
Matt Helsley9f460802005-11-07 00:59:16 -0800346 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
347}
348
349/**
350 * cn_proc_mcast_ctl
351 * @data: message sent from userspace via the connector
352 */
Stephen Boydf0b25932009-10-06 01:39:51 -0700353static void cn_proc_mcast_ctl(struct cn_msg *msg,
354 struct netlink_skb_parms *nsp)
Matt Helsley9f460802005-11-07 00:59:16 -0800355{
Matt Helsley9f460802005-11-07 00:59:16 -0800356 enum proc_cn_mcast_op *mc_op = NULL;
357 int err = 0;
358
359 if (msg->len != sizeof(*mc_op))
360 return;
361
Eric W. Biederman9582d902012-02-07 16:48:16 -0800362 /*
363 * Events are reported with respect to the initial pid
364 * and user namespaces so ignore requestors from
365 * other namespaces.
366 */
367 if ((current_user_ns() != &init_user_ns) ||
368 (task_active_pid_ns(current) != &init_pid_ns))
369 return;
370
Kees Cooke70ab972013-02-25 21:32:25 +0000371 /* Can only change if privileged. */
372 if (!capable(CAP_NET_ADMIN)) {
373 err = EPERM;
374 goto out;
375 }
376
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000377 mc_op = (enum proc_cn_mcast_op *)msg->data;
Matt Helsley9f460802005-11-07 00:59:16 -0800378 switch (*mc_op) {
379 case PROC_CN_MCAST_LISTEN:
380 atomic_inc(&proc_event_num_listeners);
381 break;
382 case PROC_CN_MCAST_IGNORE:
383 atomic_dec(&proc_event_num_listeners);
384 break;
385 default:
386 err = EINVAL;
387 break;
388 }
Kees Cooke70ab972013-02-25 21:32:25 +0000389
390out:
Matt Helsley9f460802005-11-07 00:59:16 -0800391 cn_proc_ack(err, msg->seq, msg->ack);
392}
393
394/*
395 * cn_proc_init - initialization entry point
396 *
397 * Adds the connector callback to the connector driver.
398 */
399static int __init cn_proc_init(void)
400{
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000401 int err = cn_add_callback(&cn_proc_event_id,
402 "cn_proc",
403 &cn_proc_mcast_ctl);
404 if (err) {
405 pr_warn("cn_proc failed to register\n");
Matt Helsley9f460802005-11-07 00:59:16 -0800406 return err;
407 }
408 return 0;
409}
410
411module_init(cn_proc_init);