blob: 3c56b96b4a4b15093e89de03ecaa759a8fad867e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070016 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
23
24#include <linux/config.h>
25#include <linux/module.h>
26
27#include <linux/kernel.h>
28#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/signal.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/stat.h>
34#include <linux/socket.h>
35#include <linux/un.h>
36#include <linux/fcntl.h>
37#include <linux/termios.h>
38#include <linux/sockios.h>
39#include <linux/net.h>
40#include <linux/fs.h>
41#include <linux/slab.h>
42#include <asm/uaccess.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/proc_fs.h>
47#include <linux/seq_file.h>
48#include <linux/smp_lock.h>
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/sock.h>
60#include <net/scm.h>
61
62#define Nprintk(a...)
63
64struct netlink_sock {
65 /* struct sock has to be the first member of netlink_sock */
66 struct sock sk;
67 u32 pid;
68 unsigned int groups;
69 u32 dst_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -070070 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 unsigned long state;
72 wait_queue_head_t wait;
73 struct netlink_callback *cb;
74 spinlock_t cb_lock;
75 void (*data_ready)(struct sock *sk, int bytes);
Patrick McHardy77247bb2005-08-14 19:27:13 -070076 struct module *module;
77 u32 flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078};
79
Patrick McHardy77247bb2005-08-14 19:27:13 -070080#define NETLINK_KERNEL_SOCKET 0x1
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082static inline struct netlink_sock *nlk_sk(struct sock *sk)
83{
84 return (struct netlink_sock *)sk;
85}
86
87struct nl_pid_hash {
88 struct hlist_head *table;
89 unsigned long rehash_time;
90
91 unsigned int mask;
92 unsigned int shift;
93
94 unsigned int entries;
95 unsigned int max_shift;
96
97 u32 rnd;
98};
99
100struct netlink_table {
101 struct nl_pid_hash hash;
102 struct hlist_head mc_list;
103 unsigned int nl_nonroot;
Patrick McHardy77247bb2005-08-14 19:27:13 -0700104 struct module *module;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105};
106
107static struct netlink_table *nl_table;
108
109static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
110
111static int netlink_dump(struct sock *sk);
112static void netlink_destroy_callback(struct netlink_callback *cb);
113
114static DEFINE_RWLOCK(nl_table_lock);
115static atomic_t nl_table_users = ATOMIC_INIT(0);
116
117static struct notifier_block *netlink_chain;
118
Patrick McHardyd629b832005-08-14 19:27:50 -0700119static u32 netlink_group_mask(u32 group)
120{
121 return group ? 1 << (group - 1) : 0;
122}
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
125{
126 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
127}
128
129static void netlink_sock_destruct(struct sock *sk)
130{
131 skb_queue_purge(&sk->sk_receive_queue);
132
133 if (!sock_flag(sk, SOCK_DEAD)) {
134 printk("Freeing alive netlink socket %p\n", sk);
135 return;
136 }
137 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
138 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
139 BUG_TRAP(!nlk_sk(sk)->cb);
140}
141
142/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
143 * Look, when several writers sleep and reader wakes them up, all but one
144 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
145 * this, _but_ remember, it adds useless work on UP machines.
146 */
147
148static void netlink_table_grab(void)
149{
150 write_lock_bh(&nl_table_lock);
151
152 if (atomic_read(&nl_table_users)) {
153 DECLARE_WAITQUEUE(wait, current);
154
155 add_wait_queue_exclusive(&nl_table_wait, &wait);
156 for(;;) {
157 set_current_state(TASK_UNINTERRUPTIBLE);
158 if (atomic_read(&nl_table_users) == 0)
159 break;
160 write_unlock_bh(&nl_table_lock);
161 schedule();
162 write_lock_bh(&nl_table_lock);
163 }
164
165 __set_current_state(TASK_RUNNING);
166 remove_wait_queue(&nl_table_wait, &wait);
167 }
168}
169
170static __inline__ void netlink_table_ungrab(void)
171{
172 write_unlock_bh(&nl_table_lock);
173 wake_up(&nl_table_wait);
174}
175
176static __inline__ void
177netlink_lock_table(void)
178{
179 /* read_lock() synchronizes us to netlink_table_grab */
180
181 read_lock(&nl_table_lock);
182 atomic_inc(&nl_table_users);
183 read_unlock(&nl_table_lock);
184}
185
186static __inline__ void
187netlink_unlock_table(void)
188{
189 if (atomic_dec_and_test(&nl_table_users))
190 wake_up(&nl_table_wait);
191}
192
193static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
194{
195 struct nl_pid_hash *hash = &nl_table[protocol].hash;
196 struct hlist_head *head;
197 struct sock *sk;
198 struct hlist_node *node;
199
200 read_lock(&nl_table_lock);
201 head = nl_pid_hashfn(hash, pid);
202 sk_for_each(sk, node, head) {
203 if (nlk_sk(sk)->pid == pid) {
204 sock_hold(sk);
205 goto found;
206 }
207 }
208 sk = NULL;
209found:
210 read_unlock(&nl_table_lock);
211 return sk;
212}
213
214static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
215{
216 if (size <= PAGE_SIZE)
217 return kmalloc(size, GFP_ATOMIC);
218 else
219 return (struct hlist_head *)
220 __get_free_pages(GFP_ATOMIC, get_order(size));
221}
222
223static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
224{
225 if (size <= PAGE_SIZE)
226 kfree(table);
227 else
228 free_pages((unsigned long)table, get_order(size));
229}
230
231static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
232{
233 unsigned int omask, mask, shift;
234 size_t osize, size;
235 struct hlist_head *otable, *table;
236 int i;
237
238 omask = mask = hash->mask;
239 osize = size = (mask + 1) * sizeof(*table);
240 shift = hash->shift;
241
242 if (grow) {
243 if (++shift > hash->max_shift)
244 return 0;
245 mask = mask * 2 + 1;
246 size *= 2;
247 }
248
249 table = nl_pid_hash_alloc(size);
250 if (!table)
251 return 0;
252
253 memset(table, 0, size);
254 otable = hash->table;
255 hash->table = table;
256 hash->mask = mask;
257 hash->shift = shift;
258 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
259
260 for (i = 0; i <= omask; i++) {
261 struct sock *sk;
262 struct hlist_node *node, *tmp;
263
264 sk_for_each_safe(sk, node, tmp, &otable[i])
265 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
266 }
267
268 nl_pid_hash_free(otable, osize);
269 hash->rehash_time = jiffies + 10 * 60 * HZ;
270 return 1;
271}
272
273static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
274{
275 int avg = hash->entries >> hash->shift;
276
277 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
278 return 1;
279
280 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
281 nl_pid_hash_rehash(hash, 0);
282 return 1;
283 }
284
285 return 0;
286}
287
288static struct proto_ops netlink_ops;
289
290static int netlink_insert(struct sock *sk, u32 pid)
291{
292 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
293 struct hlist_head *head;
294 int err = -EADDRINUSE;
295 struct sock *osk;
296 struct hlist_node *node;
297 int len;
298
299 netlink_table_grab();
300 head = nl_pid_hashfn(hash, pid);
301 len = 0;
302 sk_for_each(osk, node, head) {
303 if (nlk_sk(osk)->pid == pid)
304 break;
305 len++;
306 }
307 if (node)
308 goto err;
309
310 err = -EBUSY;
311 if (nlk_sk(sk)->pid)
312 goto err;
313
314 err = -ENOMEM;
315 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
316 goto err;
317
318 if (len && nl_pid_hash_dilute(hash, len))
319 head = nl_pid_hashfn(hash, pid);
320 hash->entries++;
321 nlk_sk(sk)->pid = pid;
322 sk_add_node(sk, head);
323 err = 0;
324
325err:
326 netlink_table_ungrab();
327 return err;
328}
329
330static void netlink_remove(struct sock *sk)
331{
332 netlink_table_grab();
David S. Millerd470e3b2005-06-26 15:31:51 -0700333 if (sk_del_node_init(sk))
334 nl_table[sk->sk_protocol].hash.entries--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (nlk_sk(sk)->groups)
336 __sk_del_bind_node(sk);
337 netlink_table_ungrab();
338}
339
340static struct proto netlink_proto = {
341 .name = "NETLINK",
342 .owner = THIS_MODULE,
343 .obj_size = sizeof(struct netlink_sock),
344};
345
346static int netlink_create(struct socket *sock, int protocol)
347{
348 struct sock *sk;
349 struct netlink_sock *nlk;
Patrick McHardy77247bb2005-08-14 19:27:13 -0700350 struct module *module;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352 sock->state = SS_UNCONNECTED;
353
354 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
355 return -ESOCKTNOSUPPORT;
356
357 if (protocol<0 || protocol >= MAX_LINKS)
358 return -EPROTONOSUPPORT;
359
Patrick McHardy77247bb2005-08-14 19:27:13 -0700360 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -0700361 if (!nl_table[protocol].hash.entries) {
362#ifdef CONFIG_KMOD
363 /* We do 'best effort'. If we find a matching module,
364 * it is loaded. If not, we don't return an error to
365 * allow pure userspace<->userspace communication. -HW
366 */
Patrick McHardy77247bb2005-08-14 19:27:13 -0700367 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -0700368 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -0700369 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -0700370#endif
371 }
Patrick McHardy77247bb2005-08-14 19:27:13 -0700372 module = nl_table[protocol].module;
373 if (!try_module_get(module))
374 module = NULL;
375 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -0700376
Patrick McHardy77247bb2005-08-14 19:27:13 -0700377 sock->ops = &netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
379 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
Patrick McHardy77247bb2005-08-14 19:27:13 -0700380 if (!sk) {
381 module_put(module);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 return -ENOMEM;
Patrick McHardy77247bb2005-08-14 19:27:13 -0700383 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
385 sock_init_data(sock, sk);
386
387 nlk = nlk_sk(sk);
388
Patrick McHardy77247bb2005-08-14 19:27:13 -0700389 nlk->module = module;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 spin_lock_init(&nlk->cb_lock);
391 init_waitqueue_head(&nlk->wait);
392 sk->sk_destruct = netlink_sock_destruct;
393
394 sk->sk_protocol = protocol;
395 return 0;
396}
397
398static int netlink_release(struct socket *sock)
399{
400 struct sock *sk = sock->sk;
401 struct netlink_sock *nlk;
402
403 if (!sk)
404 return 0;
405
406 netlink_remove(sk);
407 nlk = nlk_sk(sk);
408
409 spin_lock(&nlk->cb_lock);
410 if (nlk->cb) {
411 nlk->cb->done(nlk->cb);
412 netlink_destroy_callback(nlk->cb);
413 nlk->cb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
415 spin_unlock(&nlk->cb_lock);
416
417 /* OK. Socket is unlinked, and, therefore,
418 no new packets will arrive */
419
420 sock_orphan(sk);
421 sock->sk = NULL;
422 wake_up_interruptible_all(&nlk->wait);
423
424 skb_queue_purge(&sk->sk_write_queue);
425
426 if (nlk->pid && !nlk->groups) {
427 struct netlink_notify n = {
428 .protocol = sk->sk_protocol,
429 .pid = nlk->pid,
430 };
431 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
432 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -0700433
Patrick McHardy77247bb2005-08-14 19:27:13 -0700434 if (nlk->module)
435 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -0700436
Patrick McHardy77247bb2005-08-14 19:27:13 -0700437 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
Harald Welte4fdb3bb2005-08-09 19:40:55 -0700438 netlink_table_grab();
Patrick McHardy77247bb2005-08-14 19:27:13 -0700439 nl_table[sk->sk_protocol].module = NULL;
Harald Welte4fdb3bb2005-08-09 19:40:55 -0700440 netlink_table_ungrab();
441 }
Patrick McHardy77247bb2005-08-14 19:27:13 -0700442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 sock_put(sk);
444 return 0;
445}
446
447static int netlink_autobind(struct socket *sock)
448{
449 struct sock *sk = sock->sk;
450 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
451 struct hlist_head *head;
452 struct sock *osk;
453 struct hlist_node *node;
454 s32 pid = current->pid;
455 int err;
456 static s32 rover = -4097;
457
458retry:
459 cond_resched();
460 netlink_table_grab();
461 head = nl_pid_hashfn(hash, pid);
462 sk_for_each(osk, node, head) {
463 if (nlk_sk(osk)->pid == pid) {
464 /* Bind collision, search negative pid values. */
465 pid = rover--;
466 if (rover > -4097)
467 rover = -4097;
468 netlink_table_ungrab();
469 goto retry;
470 }
471 }
472 netlink_table_ungrab();
473
474 err = netlink_insert(sk, pid);
475 if (err == -EADDRINUSE)
476 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -0700477
478 /* If 2 threads race to autobind, that is fine. */
479 if (err == -EBUSY)
480 err = 0;
481
482 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485static inline int netlink_capable(struct socket *sock, unsigned int flag)
486{
487 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
488 capable(CAP_NET_ADMIN);
489}
490
491static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
492{
493 struct sock *sk = sock->sk;
494 struct netlink_sock *nlk = nlk_sk(sk);
495 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
496 int err;
497
498 if (nladdr->nl_family != AF_NETLINK)
499 return -EINVAL;
500
501 /* Only superuser is allowed to listen multicasts */
502 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
503 return -EPERM;
504
505 if (nlk->pid) {
506 if (nladdr->nl_pid != nlk->pid)
507 return -EINVAL;
508 } else {
509 err = nladdr->nl_pid ?
510 netlink_insert(sk, nladdr->nl_pid) :
511 netlink_autobind(sock);
512 if (err)
513 return err;
514 }
515
516 if (!nladdr->nl_groups && !nlk->groups)
517 return 0;
518
519 netlink_table_grab();
520 if (nlk->groups && !nladdr->nl_groups)
521 __sk_del_bind_node(sk);
522 else if (!nlk->groups && nladdr->nl_groups)
523 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
524 nlk->groups = nladdr->nl_groups;
525 netlink_table_ungrab();
526
527 return 0;
528}
529
530static int netlink_connect(struct socket *sock, struct sockaddr *addr,
531 int alen, int flags)
532{
533 int err = 0;
534 struct sock *sk = sock->sk;
535 struct netlink_sock *nlk = nlk_sk(sk);
536 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
537
538 if (addr->sa_family == AF_UNSPEC) {
539 sk->sk_state = NETLINK_UNCONNECTED;
540 nlk->dst_pid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -0700541 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 return 0;
543 }
544 if (addr->sa_family != AF_NETLINK)
545 return -EINVAL;
546
547 /* Only superuser is allowed to send multicasts */
548 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
549 return -EPERM;
550
551 if (!nlk->pid)
552 err = netlink_autobind(sock);
553
554 if (err == 0) {
555 sk->sk_state = NETLINK_CONNECTED;
556 nlk->dst_pid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -0700557 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 }
559
560 return err;
561}
562
563static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
564{
565 struct sock *sk = sock->sk;
566 struct netlink_sock *nlk = nlk_sk(sk);
567 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
568
569 nladdr->nl_family = AF_NETLINK;
570 nladdr->nl_pad = 0;
571 *addr_len = sizeof(*nladdr);
572
573 if (peer) {
574 nladdr->nl_pid = nlk->dst_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -0700575 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 } else {
577 nladdr->nl_pid = nlk->pid;
Patrick McHardyd629b832005-08-14 19:27:50 -0700578 nladdr->nl_groups = nlk->groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 }
580 return 0;
581}
582
583static void netlink_overrun(struct sock *sk)
584{
585 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
586 sk->sk_err = ENOBUFS;
587 sk->sk_error_report(sk);
588 }
589}
590
591static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
592{
593 int protocol = ssk->sk_protocol;
594 struct sock *sock;
595 struct netlink_sock *nlk;
596
597 sock = netlink_lookup(protocol, pid);
598 if (!sock)
599 return ERR_PTR(-ECONNREFUSED);
600
601 /* Don't bother queuing skb if kernel socket has no input function */
602 nlk = nlk_sk(sock);
603 if ((nlk->pid == 0 && !nlk->data_ready) ||
604 (sock->sk_state == NETLINK_CONNECTED &&
605 nlk->dst_pid != nlk_sk(ssk)->pid)) {
606 sock_put(sock);
607 return ERR_PTR(-ECONNREFUSED);
608 }
609 return sock;
610}
611
612struct sock *netlink_getsockbyfilp(struct file *filp)
613{
614 struct inode *inode = filp->f_dentry->d_inode;
615 struct sock *sock;
616
617 if (!S_ISSOCK(inode->i_mode))
618 return ERR_PTR(-ENOTSOCK);
619
620 sock = SOCKET_I(inode)->sk;
621 if (sock->sk_family != AF_NETLINK)
622 return ERR_PTR(-EINVAL);
623
624 sock_hold(sock);
625 return sock;
626}
627
628/*
629 * Attach a skb to a netlink socket.
630 * The caller must hold a reference to the destination socket. On error, the
631 * reference is dropped. The skb is not send to the destination, just all
632 * all error checks are performed and memory in the queue is reserved.
633 * Return values:
634 * < 0: error. skb freed, reference to sock dropped.
635 * 0: continue
636 * 1: repeat lookup - reference dropped while waiting for socket memory.
637 */
638int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
639{
640 struct netlink_sock *nlk;
641
642 nlk = nlk_sk(sk);
643
644 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
645 test_bit(0, &nlk->state)) {
646 DECLARE_WAITQUEUE(wait, current);
647 if (!timeo) {
648 if (!nlk->pid)
649 netlink_overrun(sk);
650 sock_put(sk);
651 kfree_skb(skb);
652 return -EAGAIN;
653 }
654
655 __set_current_state(TASK_INTERRUPTIBLE);
656 add_wait_queue(&nlk->wait, &wait);
657
658 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
659 test_bit(0, &nlk->state)) &&
660 !sock_flag(sk, SOCK_DEAD))
661 timeo = schedule_timeout(timeo);
662
663 __set_current_state(TASK_RUNNING);
664 remove_wait_queue(&nlk->wait, &wait);
665 sock_put(sk);
666
667 if (signal_pending(current)) {
668 kfree_skb(skb);
669 return sock_intr_errno(timeo);
670 }
671 return 1;
672 }
673 skb_set_owner_r(skb, sk);
674 return 0;
675}
676
677int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
678{
679 struct netlink_sock *nlk;
680 int len = skb->len;
681
682 nlk = nlk_sk(sk);
683
684 skb_queue_tail(&sk->sk_receive_queue, skb);
685 sk->sk_data_ready(sk, len);
686 sock_put(sk);
687 return len;
688}
689
690void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
691{
692 kfree_skb(skb);
693 sock_put(sk);
694}
695
Victor Fusco37da6472005-07-18 13:35:43 -0700696static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
697 unsigned int __nocast allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698{
699 int delta;
700
701 skb_orphan(skb);
702
703 delta = skb->end - skb->tail;
704 if (delta * 2 < skb->truesize)
705 return skb;
706
707 if (skb_shared(skb)) {
708 struct sk_buff *nskb = skb_clone(skb, allocation);
709 if (!nskb)
710 return skb;
711 kfree_skb(skb);
712 skb = nskb;
713 }
714
715 if (!pskb_expand_head(skb, 0, -delta, allocation))
716 skb->truesize -= delta;
717
718 return skb;
719}
720
721int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
722{
723 struct sock *sk;
724 int err;
725 long timeo;
726
727 skb = netlink_trim(skb, gfp_any());
728
729 timeo = sock_sndtimeo(ssk, nonblock);
730retry:
731 sk = netlink_getsockbypid(ssk, pid);
732 if (IS_ERR(sk)) {
733 kfree_skb(skb);
734 return PTR_ERR(sk);
735 }
736 err = netlink_attachskb(sk, skb, nonblock, timeo);
737 if (err == 1)
738 goto retry;
739 if (err)
740 return err;
741
742 return netlink_sendskb(sk, skb, ssk->sk_protocol);
743}
744
745static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
746{
747 struct netlink_sock *nlk = nlk_sk(sk);
748
749 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
750 !test_bit(0, &nlk->state)) {
751 skb_set_owner_r(skb, sk);
752 skb_queue_tail(&sk->sk_receive_queue, skb);
753 sk->sk_data_ready(sk, skb->len);
754 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
755 }
756 return -1;
757}
758
759struct netlink_broadcast_data {
760 struct sock *exclude_sk;
761 u32 pid;
762 u32 group;
763 int failure;
764 int congested;
765 int delivered;
Victor Fusco37da6472005-07-18 13:35:43 -0700766 unsigned int allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 struct sk_buff *skb, *skb2;
768};
769
770static inline int do_one_broadcast(struct sock *sk,
771 struct netlink_broadcast_data *p)
772{
773 struct netlink_sock *nlk = nlk_sk(sk);
774 int val;
775
776 if (p->exclude_sk == sk)
777 goto out;
778
Patrick McHardyd629b832005-08-14 19:27:50 -0700779 if (nlk->pid == p->pid || !(nlk->groups & netlink_group_mask(p->group)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 goto out;
781
782 if (p->failure) {
783 netlink_overrun(sk);
784 goto out;
785 }
786
787 sock_hold(sk);
788 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -0700789 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 p->skb2 = skb_clone(p->skb, p->allocation);
791 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -0700792 p->skb2 = skb_get(p->skb);
793 /*
794 * skb ownership may have been set when
795 * delivered to a previous socket.
796 */
797 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 }
799 }
800 if (p->skb2 == NULL) {
801 netlink_overrun(sk);
802 /* Clone failed. Notify ALL listeners. */
803 p->failure = 1;
804 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
805 netlink_overrun(sk);
806 } else {
807 p->congested |= val;
808 p->delivered = 1;
809 p->skb2 = NULL;
810 }
811 sock_put(sk);
812
813out:
814 return 0;
815}
816
817int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
818 u32 group, int allocation)
819{
820 struct netlink_broadcast_data info;
821 struct hlist_node *node;
822 struct sock *sk;
823
824 skb = netlink_trim(skb, allocation);
825
826 info.exclude_sk = ssk;
827 info.pid = pid;
828 info.group = group;
829 info.failure = 0;
830 info.congested = 0;
831 info.delivered = 0;
832 info.allocation = allocation;
833 info.skb = skb;
834 info.skb2 = NULL;
835
836 /* While we sleep in clone, do not allow to change socket list */
837
838 netlink_lock_table();
839
840 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
841 do_one_broadcast(sk, &info);
842
Tommy S. Christensenaa1c6a62005-05-19 13:07:32 -0700843 kfree_skb(skb);
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 netlink_unlock_table();
846
847 if (info.skb2)
848 kfree_skb(info.skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
850 if (info.delivered) {
851 if (info.congested && (allocation & __GFP_WAIT))
852 yield();
853 return 0;
854 }
855 if (info.failure)
856 return -ENOBUFS;
857 return -ESRCH;
858}
859
860struct netlink_set_err_data {
861 struct sock *exclude_sk;
862 u32 pid;
863 u32 group;
864 int code;
865};
866
867static inline int do_one_set_err(struct sock *sk,
868 struct netlink_set_err_data *p)
869{
870 struct netlink_sock *nlk = nlk_sk(sk);
871
872 if (sk == p->exclude_sk)
873 goto out;
874
Patrick McHardyd629b832005-08-14 19:27:50 -0700875 if (nlk->pid == p->pid || !(nlk->groups & netlink_group_mask(p->group)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 goto out;
877
878 sk->sk_err = p->code;
879 sk->sk_error_report(sk);
880out:
881 return 0;
882}
883
884void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
885{
886 struct netlink_set_err_data info;
887 struct hlist_node *node;
888 struct sock *sk;
889
890 info.exclude_sk = ssk;
891 info.pid = pid;
892 info.group = group;
893 info.code = code;
894
895 read_lock(&nl_table_lock);
896
897 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
898 do_one_set_err(sk, &info);
899
900 read_unlock(&nl_table_lock);
901}
902
903static inline void netlink_rcv_wake(struct sock *sk)
904{
905 struct netlink_sock *nlk = nlk_sk(sk);
906
David S. Millerb03efcf2005-07-08 14:57:23 -0700907 if (skb_queue_empty(&sk->sk_receive_queue))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 clear_bit(0, &nlk->state);
909 if (!test_bit(0, &nlk->state))
910 wake_up_interruptible(&nlk->wait);
911}
912
913static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
914 struct msghdr *msg, size_t len)
915{
916 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
917 struct sock *sk = sock->sk;
918 struct netlink_sock *nlk = nlk_sk(sk);
919 struct sockaddr_nl *addr=msg->msg_name;
920 u32 dst_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -0700921 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 struct sk_buff *skb;
923 int err;
924 struct scm_cookie scm;
925
926 if (msg->msg_flags&MSG_OOB)
927 return -EOPNOTSUPP;
928
929 if (NULL == siocb->scm)
930 siocb->scm = &scm;
931 err = scm_send(sock, msg, siocb->scm);
932 if (err < 0)
933 return err;
934
935 if (msg->msg_namelen) {
936 if (addr->nl_family != AF_NETLINK)
937 return -EINVAL;
938 dst_pid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -0700939 dst_group = ffs(addr->nl_groups);
940 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 return -EPERM;
942 } else {
943 dst_pid = nlk->dst_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -0700944 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 }
946
947 if (!nlk->pid) {
948 err = netlink_autobind(sock);
949 if (err)
950 goto out;
951 }
952
953 err = -EMSGSIZE;
954 if (len > sk->sk_sndbuf - 32)
955 goto out;
956 err = -ENOBUFS;
957 skb = alloc_skb(len, GFP_KERNEL);
958 if (skb==NULL)
959 goto out;
960
961 NETLINK_CB(skb).pid = nlk->pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 NETLINK_CB(skb).dst_pid = dst_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -0700963 NETLINK_CB(skb).dst_group = dst_group;
Serge Hallync94c2572005-04-29 16:27:17 +0100964 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
966
967 /* What can I do? Netlink is asynchronous, so that
968 we will have to save current capabilities to
969 check them, when this message will be delivered
970 to corresponding kernel module. --ANK (980802)
971 */
972
973 err = -EFAULT;
974 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
975 kfree_skb(skb);
976 goto out;
977 }
978
979 err = security_netlink_send(sk, skb);
980 if (err) {
981 kfree_skb(skb);
982 goto out;
983 }
984
Patrick McHardyd629b832005-08-14 19:27:50 -0700985 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 atomic_inc(&skb->users);
Patrick McHardyd629b832005-08-14 19:27:50 -0700987 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 }
989 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
990
991out:
992 return err;
993}
994
995static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
996 struct msghdr *msg, size_t len,
997 int flags)
998{
999 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1000 struct scm_cookie scm;
1001 struct sock *sk = sock->sk;
1002 struct netlink_sock *nlk = nlk_sk(sk);
1003 int noblock = flags&MSG_DONTWAIT;
1004 size_t copied;
1005 struct sk_buff *skb;
1006 int err;
1007
1008 if (flags&MSG_OOB)
1009 return -EOPNOTSUPP;
1010
1011 copied = 0;
1012
1013 skb = skb_recv_datagram(sk,flags,noblock,&err);
1014 if (skb==NULL)
1015 goto out;
1016
1017 msg->msg_namelen = 0;
1018
1019 copied = skb->len;
1020 if (len < copied) {
1021 msg->msg_flags |= MSG_TRUNC;
1022 copied = len;
1023 }
1024
1025 skb->h.raw = skb->data;
1026 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1027
1028 if (msg->msg_name) {
1029 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1030 addr->nl_family = AF_NETLINK;
1031 addr->nl_pad = 0;
1032 addr->nl_pid = NETLINK_CB(skb).pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001033 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 msg->msg_namelen = sizeof(*addr);
1035 }
1036
1037 if (NULL == siocb->scm) {
1038 memset(&scm, 0, sizeof(scm));
1039 siocb->scm = &scm;
1040 }
1041 siocb->scm->creds = *NETLINK_CREDS(skb);
1042 skb_free_datagram(sk, skb);
1043
1044 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1045 netlink_dump(sk);
1046
1047 scm_recv(sock, msg, siocb->scm, flags);
1048
1049out:
1050 netlink_rcv_wake(sk);
1051 return err ? : copied;
1052}
1053
1054static void netlink_data_ready(struct sock *sk, int len)
1055{
1056 struct netlink_sock *nlk = nlk_sk(sk);
1057
1058 if (nlk->data_ready)
1059 nlk->data_ready(sk, len);
1060 netlink_rcv_wake(sk);
1061}
1062
1063/*
1064 * We export these functions to other modules. They provide a
1065 * complete set of kernel non-blocking support for message
1066 * queueing.
1067 */
1068
1069struct sock *
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001070netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len), struct module *module)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
1072 struct socket *sock;
1073 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001074 struct netlink_sock *nlk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
1076 if (!nl_table)
1077 return NULL;
1078
1079 if (unit<0 || unit>=MAX_LINKS)
1080 return NULL;
1081
1082 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1083 return NULL;
1084
Patrick McHardy77247bb2005-08-14 19:27:13 -07001085 if (netlink_create(sock, unit) < 0)
1086 goto out_sock_release;
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 sk = sock->sk;
1089 sk->sk_data_ready = netlink_data_ready;
1090 if (input)
1091 nlk_sk(sk)->data_ready = input;
1092
Patrick McHardy77247bb2005-08-14 19:27:13 -07001093 if (netlink_insert(sk, 0))
1094 goto out_sock_release;
1095
1096 nlk = nlk_sk(sk);
1097 nlk->flags |= NETLINK_KERNEL_SOCKET;
1098
1099 netlink_table_grab();
1100 nl_table[unit].module = module;
1101 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001102
1103 return sk;
1104
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001105out_sock_release:
1106 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001107 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108}
1109
1110void netlink_set_nonroot(int protocol, unsigned int flags)
1111{
1112 if ((unsigned int)protocol < MAX_LINKS)
1113 nl_table[protocol].nl_nonroot = flags;
1114}
1115
1116static void netlink_destroy_callback(struct netlink_callback *cb)
1117{
1118 if (cb->skb)
1119 kfree_skb(cb->skb);
1120 kfree(cb);
1121}
1122
1123/*
1124 * It looks a bit ugly.
1125 * It would be better to create kernel thread.
1126 */
1127
1128static int netlink_dump(struct sock *sk)
1129{
1130 struct netlink_sock *nlk = nlk_sk(sk);
1131 struct netlink_callback *cb;
1132 struct sk_buff *skb;
1133 struct nlmsghdr *nlh;
1134 int len;
1135
1136 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1137 if (!skb)
1138 return -ENOBUFS;
1139
1140 spin_lock(&nlk->cb_lock);
1141
1142 cb = nlk->cb;
1143 if (cb == NULL) {
1144 spin_unlock(&nlk->cb_lock);
1145 kfree_skb(skb);
1146 return -EINVAL;
1147 }
1148
1149 len = cb->dump(skb, cb);
1150
1151 if (len > 0) {
1152 spin_unlock(&nlk->cb_lock);
1153 skb_queue_tail(&sk->sk_receive_queue, skb);
1154 sk->sk_data_ready(sk, len);
1155 return 0;
1156 }
1157
Thomas Graf17977542005-06-18 22:53:48 -07001158 nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1160 skb_queue_tail(&sk->sk_receive_queue, skb);
1161 sk->sk_data_ready(sk, skb->len);
1162
1163 cb->done(cb);
1164 nlk->cb = NULL;
1165 spin_unlock(&nlk->cb_lock);
1166
1167 netlink_destroy_callback(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07001169
1170nlmsg_failure:
1171 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172}
1173
1174int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1175 struct nlmsghdr *nlh,
1176 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1177 int (*done)(struct netlink_callback*))
1178{
1179 struct netlink_callback *cb;
1180 struct sock *sk;
1181 struct netlink_sock *nlk;
1182
1183 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1184 if (cb == NULL)
1185 return -ENOBUFS;
1186
1187 memset(cb, 0, sizeof(*cb));
1188 cb->dump = dump;
1189 cb->done = done;
1190 cb->nlh = nlh;
1191 atomic_inc(&skb->users);
1192 cb->skb = skb;
1193
1194 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1195 if (sk == NULL) {
1196 netlink_destroy_callback(cb);
1197 return -ECONNREFUSED;
1198 }
1199 nlk = nlk_sk(sk);
1200 /* A dump is in progress... */
1201 spin_lock(&nlk->cb_lock);
1202 if (nlk->cb) {
1203 spin_unlock(&nlk->cb_lock);
1204 netlink_destroy_callback(cb);
1205 sock_put(sk);
1206 return -EBUSY;
1207 }
1208 nlk->cb = cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 spin_unlock(&nlk->cb_lock);
1210
1211 netlink_dump(sk);
1212 sock_put(sk);
1213 return 0;
1214}
1215
1216void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1217{
1218 struct sk_buff *skb;
1219 struct nlmsghdr *rep;
1220 struct nlmsgerr *errmsg;
1221 int size;
1222
1223 if (err == 0)
1224 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1225 else
1226 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1227
1228 skb = alloc_skb(size, GFP_KERNEL);
1229 if (!skb) {
1230 struct sock *sk;
1231
1232 sk = netlink_lookup(in_skb->sk->sk_protocol,
1233 NETLINK_CB(in_skb).pid);
1234 if (sk) {
1235 sk->sk_err = ENOBUFS;
1236 sk->sk_error_report(sk);
1237 sock_put(sk);
1238 }
1239 return;
1240 }
1241
1242 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
Thomas Graf17977542005-06-18 22:53:48 -07001243 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 errmsg = NLMSG_DATA(rep);
1245 errmsg->error = err;
1246 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1247 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1248}
1249
1250
1251#ifdef CONFIG_PROC_FS
1252struct nl_seq_iter {
1253 int link;
1254 int hash_idx;
1255};
1256
1257static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1258{
1259 struct nl_seq_iter *iter = seq->private;
1260 int i, j;
1261 struct sock *s;
1262 struct hlist_node *node;
1263 loff_t off = 0;
1264
1265 for (i=0; i<MAX_LINKS; i++) {
1266 struct nl_pid_hash *hash = &nl_table[i].hash;
1267
1268 for (j = 0; j <= hash->mask; j++) {
1269 sk_for_each(s, node, &hash->table[j]) {
1270 if (off == pos) {
1271 iter->link = i;
1272 iter->hash_idx = j;
1273 return s;
1274 }
1275 ++off;
1276 }
1277 }
1278 }
1279 return NULL;
1280}
1281
1282static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1283{
1284 read_lock(&nl_table_lock);
1285 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1286}
1287
1288static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1289{
1290 struct sock *s;
1291 struct nl_seq_iter *iter;
1292 int i, j;
1293
1294 ++*pos;
1295
1296 if (v == SEQ_START_TOKEN)
1297 return netlink_seq_socket_idx(seq, 0);
1298
1299 s = sk_next(v);
1300 if (s)
1301 return s;
1302
1303 iter = seq->private;
1304 i = iter->link;
1305 j = iter->hash_idx + 1;
1306
1307 do {
1308 struct nl_pid_hash *hash = &nl_table[i].hash;
1309
1310 for (; j <= hash->mask; j++) {
1311 s = sk_head(&hash->table[j]);
1312 if (s) {
1313 iter->link = i;
1314 iter->hash_idx = j;
1315 return s;
1316 }
1317 }
1318
1319 j = 0;
1320 } while (++i < MAX_LINKS);
1321
1322 return NULL;
1323}
1324
1325static void netlink_seq_stop(struct seq_file *seq, void *v)
1326{
1327 read_unlock(&nl_table_lock);
1328}
1329
1330
1331static int netlink_seq_show(struct seq_file *seq, void *v)
1332{
1333 if (v == SEQ_START_TOKEN)
1334 seq_puts(seq,
1335 "sk Eth Pid Groups "
1336 "Rmem Wmem Dump Locks\n");
1337 else {
1338 struct sock *s = v;
1339 struct netlink_sock *nlk = nlk_sk(s);
1340
1341 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1342 s,
1343 s->sk_protocol,
1344 nlk->pid,
1345 nlk->groups,
1346 atomic_read(&s->sk_rmem_alloc),
1347 atomic_read(&s->sk_wmem_alloc),
1348 nlk->cb,
1349 atomic_read(&s->sk_refcnt)
1350 );
1351
1352 }
1353 return 0;
1354}
1355
1356static struct seq_operations netlink_seq_ops = {
1357 .start = netlink_seq_start,
1358 .next = netlink_seq_next,
1359 .stop = netlink_seq_stop,
1360 .show = netlink_seq_show,
1361};
1362
1363
1364static int netlink_seq_open(struct inode *inode, struct file *file)
1365{
1366 struct seq_file *seq;
1367 struct nl_seq_iter *iter;
1368 int err;
1369
1370 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1371 if (!iter)
1372 return -ENOMEM;
1373
1374 err = seq_open(file, &netlink_seq_ops);
1375 if (err) {
1376 kfree(iter);
1377 return err;
1378 }
1379
1380 memset(iter, 0, sizeof(*iter));
1381 seq = file->private_data;
1382 seq->private = iter;
1383 return 0;
1384}
1385
1386static struct file_operations netlink_seq_fops = {
1387 .owner = THIS_MODULE,
1388 .open = netlink_seq_open,
1389 .read = seq_read,
1390 .llseek = seq_lseek,
1391 .release = seq_release_private,
1392};
1393
1394#endif
1395
1396int netlink_register_notifier(struct notifier_block *nb)
1397{
1398 return notifier_chain_register(&netlink_chain, nb);
1399}
1400
1401int netlink_unregister_notifier(struct notifier_block *nb)
1402{
1403 return notifier_chain_unregister(&netlink_chain, nb);
1404}
1405
1406static struct proto_ops netlink_ops = {
1407 .family = PF_NETLINK,
1408 .owner = THIS_MODULE,
1409 .release = netlink_release,
1410 .bind = netlink_bind,
1411 .connect = netlink_connect,
1412 .socketpair = sock_no_socketpair,
1413 .accept = sock_no_accept,
1414 .getname = netlink_getname,
1415 .poll = datagram_poll,
1416 .ioctl = sock_no_ioctl,
1417 .listen = sock_no_listen,
1418 .shutdown = sock_no_shutdown,
1419 .setsockopt = sock_no_setsockopt,
1420 .getsockopt = sock_no_getsockopt,
1421 .sendmsg = netlink_sendmsg,
1422 .recvmsg = netlink_recvmsg,
1423 .mmap = sock_no_mmap,
1424 .sendpage = sock_no_sendpage,
1425};
1426
1427static struct net_proto_family netlink_family_ops = {
1428 .family = PF_NETLINK,
1429 .create = netlink_create,
1430 .owner = THIS_MODULE, /* for consistency 8) */
1431};
1432
1433extern void netlink_skb_parms_too_large(void);
1434
1435static int __init netlink_proto_init(void)
1436{
1437 struct sk_buff *dummy_skb;
1438 int i;
1439 unsigned long max;
1440 unsigned int order;
1441 int err = proto_register(&netlink_proto, 0);
1442
1443 if (err != 0)
1444 goto out;
1445
1446 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1447 netlink_skb_parms_too_large();
1448
1449 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1450 if (!nl_table) {
1451enomem:
1452 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1453 return -ENOMEM;
1454 }
1455
1456 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1457
1458 if (num_physpages >= (128 * 1024))
1459 max = num_physpages >> (21 - PAGE_SHIFT);
1460 else
1461 max = num_physpages >> (23 - PAGE_SHIFT);
1462
1463 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1464 max = (1UL << order) / sizeof(struct hlist_head);
1465 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1466
1467 for (i = 0; i < MAX_LINKS; i++) {
1468 struct nl_pid_hash *hash = &nl_table[i].hash;
1469
1470 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1471 if (!hash->table) {
1472 while (i-- > 0)
1473 nl_pid_hash_free(nl_table[i].hash.table,
1474 1 * sizeof(*hash->table));
1475 kfree(nl_table);
1476 goto enomem;
1477 }
1478 memset(hash->table, 0, 1 * sizeof(*hash->table));
1479 hash->max_shift = order;
1480 hash->shift = 0;
1481 hash->mask = 0;
1482 hash->rehash_time = jiffies;
1483 }
1484
1485 sock_register(&netlink_family_ops);
1486#ifdef CONFIG_PROC_FS
1487 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1488#endif
1489 /* The netlink device handler may be needed early. */
1490 rtnetlink_init();
1491out:
1492 return err;
1493}
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495core_initcall(netlink_proto_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497EXPORT_SYMBOL(netlink_ack);
1498EXPORT_SYMBOL(netlink_broadcast);
1499EXPORT_SYMBOL(netlink_dump_start);
1500EXPORT_SYMBOL(netlink_kernel_create);
1501EXPORT_SYMBOL(netlink_register_notifier);
1502EXPORT_SYMBOL(netlink_set_err);
1503EXPORT_SYMBOL(netlink_set_nonroot);
1504EXPORT_SYMBOL(netlink_unicast);
1505EXPORT_SYMBOL(netlink_unregister_notifier);
1506