blob: 64334893c61cc2d0530afa2f28bff044102b183a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000061#include <asm/cacheflush.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010062
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020063#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <net/sock.h>
65#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010066#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Andrey Vagin0f29c762013-03-21 20:33:47 +040068#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Eric Dumazet5c398dc2010-10-24 04:27:10 +000070struct listeners {
71 struct rcu_head rcu;
72 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000073};
74
Patrick McHardycd967e02013-04-17 06:46:56 +000075/* state bits */
76#define NETLINK_CONGESTED 0x0
77
78/* flags */
Patrick McHardy77247bb2005-08-14 19:27:13 -070079#define NETLINK_KERNEL_SOCKET 0x1
Patrick McHardy9a4595b2005-08-15 12:32:15 -070080#define NETLINK_RECV_PKTINFO 0x2
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +000081#define NETLINK_BROADCAST_SEND_ERROR 0x4
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -070082#define NETLINK_RECV_NO_ENOBUFS 0x8
Patrick McHardy77247bb2005-08-14 19:27:13 -070083
David S. Miller035c4c12011-12-23 17:33:03 -050084static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070085{
86 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
87}
88
Andrey Vagin0f29c762013-03-21 20:33:47 +040089struct netlink_table *nl_table;
90EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
93
94static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +000095static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Andrey Vagin0f29c762013-03-21 20:33:47 +040097DEFINE_RWLOCK(nl_table_lock);
98EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099static atomic_t nl_table_users = ATOMIC_INIT(0);
100
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000101#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
102
Alan Sterne041c682006-03-27 01:16:30 -0800103static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200105static DEFINE_SPINLOCK(netlink_tap_lock);
106static struct list_head netlink_tap_all __read_mostly;
107
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000108static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700109{
110 return group ? 1 << (group - 1) : 0;
111}
112
Eric W. Biederman15e47302012-09-07 20:12:54 +0000113static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
Eric W. Biederman15e47302012-09-07 20:12:54 +0000115 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200118int netlink_add_tap(struct netlink_tap *nt)
119{
120 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
121 return -EINVAL;
122
123 spin_lock(&netlink_tap_lock);
124 list_add_rcu(&nt->list, &netlink_tap_all);
125 spin_unlock(&netlink_tap_lock);
126
127 if (nt->module)
128 __module_get(nt->module);
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(netlink_add_tap);
133
134int __netlink_remove_tap(struct netlink_tap *nt)
135{
136 bool found = false;
137 struct netlink_tap *tmp;
138
139 spin_lock(&netlink_tap_lock);
140
141 list_for_each_entry(tmp, &netlink_tap_all, list) {
142 if (nt == tmp) {
143 list_del_rcu(&nt->list);
144 found = true;
145 goto out;
146 }
147 }
148
149 pr_warn("__netlink_remove_tap: %p not found\n", nt);
150out:
151 spin_unlock(&netlink_tap_lock);
152
153 if (found && nt->module)
154 module_put(nt->module);
155
156 return found ? 0 : -ENODEV;
157}
158EXPORT_SYMBOL_GPL(__netlink_remove_tap);
159
160int netlink_remove_tap(struct netlink_tap *nt)
161{
162 int ret;
163
164 ret = __netlink_remove_tap(nt);
165 synchronize_net();
166
167 return ret;
168}
169EXPORT_SYMBOL_GPL(netlink_remove_tap);
170
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200171static bool netlink_filter_tap(const struct sk_buff *skb)
172{
173 struct sock *sk = skb->sk;
174 bool pass = false;
175
176 /* We take the more conservative approach and
177 * whitelist socket protocols that may pass.
178 */
179 switch (sk->sk_protocol) {
180 case NETLINK_ROUTE:
181 case NETLINK_USERSOCK:
182 case NETLINK_SOCK_DIAG:
183 case NETLINK_NFLOG:
184 case NETLINK_XFRM:
185 case NETLINK_FIB_LOOKUP:
186 case NETLINK_NETFILTER:
187 case NETLINK_GENERIC:
188 pass = true;
189 break;
190 }
191
192 return pass;
193}
194
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200195static int __netlink_deliver_tap_skb(struct sk_buff *skb,
196 struct net_device *dev)
197{
198 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200199 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200200 int ret = -ENOMEM;
201
202 dev_hold(dev);
203 nskb = skb_clone(skb, GFP_ATOMIC);
204 if (nskb) {
205 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200206 nskb->protocol = htons((u16) sk->sk_protocol);
207
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200208 ret = dev_queue_xmit(nskb);
209 if (unlikely(ret > 0))
210 ret = net_xmit_errno(ret);
211 }
212
213 dev_put(dev);
214 return ret;
215}
216
217static void __netlink_deliver_tap(struct sk_buff *skb)
218{
219 int ret;
220 struct netlink_tap *tmp;
221
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200222 if (!netlink_filter_tap(skb))
223 return;
224
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200225 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
226 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
227 if (unlikely(ret))
228 break;
229 }
230}
231
232static void netlink_deliver_tap(struct sk_buff *skb)
233{
234 rcu_read_lock();
235
236 if (unlikely(!list_empty(&netlink_tap_all)))
237 __netlink_deliver_tap(skb);
238
239 rcu_read_unlock();
240}
241
Patrick McHardycd1df522013-04-17 06:47:05 +0000242static void netlink_overrun(struct sock *sk)
243{
244 struct netlink_sock *nlk = nlk_sk(sk);
245
246 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
247 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
248 sk->sk_err = ENOBUFS;
249 sk->sk_error_report(sk);
250 }
251 }
252 atomic_inc(&sk->sk_drops);
253}
254
255static void netlink_rcv_wake(struct sock *sk)
256{
257 struct netlink_sock *nlk = nlk_sk(sk);
258
259 if (skb_queue_empty(&sk->sk_receive_queue))
260 clear_bit(NETLINK_CONGESTED, &nlk->state);
261 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
262 wake_up_interruptible(&nlk->wait);
263}
264
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000265#ifdef CONFIG_NETLINK_MMAP
Patrick McHardy9652e932013-04-17 06:47:02 +0000266static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
267{
268 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
269}
270
Patrick McHardyf9c22882013-04-17 06:47:04 +0000271static bool netlink_rx_is_mmaped(struct sock *sk)
272{
273 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
274}
275
Patrick McHardy5fd96122013-04-17 06:47:03 +0000276static bool netlink_tx_is_mmaped(struct sock *sk)
277{
278 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
279}
280
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000281static __pure struct page *pgvec_to_page(const void *addr)
282{
283 if (is_vmalloc_addr(addr))
284 return vmalloc_to_page(addr);
285 else
286 return virt_to_page(addr);
287}
288
289static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
290{
291 unsigned int i;
292
293 for (i = 0; i < len; i++) {
294 if (pg_vec[i] != NULL) {
295 if (is_vmalloc_addr(pg_vec[i]))
296 vfree(pg_vec[i]);
297 else
298 free_pages((unsigned long)pg_vec[i], order);
299 }
300 }
301 kfree(pg_vec);
302}
303
304static void *alloc_one_pg_vec_page(unsigned long order)
305{
306 void *buffer;
307 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
308 __GFP_NOWARN | __GFP_NORETRY;
309
310 buffer = (void *)__get_free_pages(gfp_flags, order);
311 if (buffer != NULL)
312 return buffer;
313
314 buffer = vzalloc((1 << order) * PAGE_SIZE);
315 if (buffer != NULL)
316 return buffer;
317
318 gfp_flags &= ~__GFP_NORETRY;
319 return (void *)__get_free_pages(gfp_flags, order);
320}
321
322static void **alloc_pg_vec(struct netlink_sock *nlk,
323 struct nl_mmap_req *req, unsigned int order)
324{
325 unsigned int block_nr = req->nm_block_nr;
326 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200327 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000328
329 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
330 if (pg_vec == NULL)
331 return NULL;
332
333 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200334 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000335 if (pg_vec[i] == NULL)
336 goto err1;
337 }
338
339 return pg_vec;
340err1:
341 free_pg_vec(pg_vec, order, block_nr);
342 return NULL;
343}
344
345static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
346 bool closing, bool tx_ring)
347{
348 struct netlink_sock *nlk = nlk_sk(sk);
349 struct netlink_ring *ring;
350 struct sk_buff_head *queue;
351 void **pg_vec = NULL;
352 unsigned int order = 0;
353 int err;
354
355 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
356 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
357
358 if (!closing) {
359 if (atomic_read(&nlk->mapped))
360 return -EBUSY;
361 if (atomic_read(&ring->pending))
362 return -EBUSY;
363 }
364
365 if (req->nm_block_nr) {
366 if (ring->pg_vec != NULL)
367 return -EBUSY;
368
369 if ((int)req->nm_block_size <= 0)
370 return -EINVAL;
371 if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
372 return -EINVAL;
373 if (req->nm_frame_size < NL_MMAP_HDRLEN)
374 return -EINVAL;
375 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
376 return -EINVAL;
377
378 ring->frames_per_block = req->nm_block_size /
379 req->nm_frame_size;
380 if (ring->frames_per_block == 0)
381 return -EINVAL;
382 if (ring->frames_per_block * req->nm_block_nr !=
383 req->nm_frame_nr)
384 return -EINVAL;
385
386 order = get_order(req->nm_block_size);
387 pg_vec = alloc_pg_vec(nlk, req, order);
388 if (pg_vec == NULL)
389 return -ENOMEM;
390 } else {
391 if (req->nm_frame_nr)
392 return -EINVAL;
393 }
394
395 err = -EBUSY;
396 mutex_lock(&nlk->pg_vec_lock);
397 if (closing || atomic_read(&nlk->mapped) == 0) {
398 err = 0;
399 spin_lock_bh(&queue->lock);
400
401 ring->frame_max = req->nm_frame_nr - 1;
402 ring->head = 0;
403 ring->frame_size = req->nm_frame_size;
404 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
405
406 swap(ring->pg_vec_len, req->nm_block_nr);
407 swap(ring->pg_vec_order, order);
408 swap(ring->pg_vec, pg_vec);
409
410 __skb_queue_purge(queue);
411 spin_unlock_bh(&queue->lock);
412
413 WARN_ON(atomic_read(&nlk->mapped));
414 }
415 mutex_unlock(&nlk->pg_vec_lock);
416
417 if (pg_vec)
418 free_pg_vec(pg_vec, order, req->nm_block_nr);
419 return err;
420}
421
422static void netlink_mm_open(struct vm_area_struct *vma)
423{
424 struct file *file = vma->vm_file;
425 struct socket *sock = file->private_data;
426 struct sock *sk = sock->sk;
427
428 if (sk)
429 atomic_inc(&nlk_sk(sk)->mapped);
430}
431
432static void netlink_mm_close(struct vm_area_struct *vma)
433{
434 struct file *file = vma->vm_file;
435 struct socket *sock = file->private_data;
436 struct sock *sk = sock->sk;
437
438 if (sk)
439 atomic_dec(&nlk_sk(sk)->mapped);
440}
441
442static const struct vm_operations_struct netlink_mmap_ops = {
443 .open = netlink_mm_open,
444 .close = netlink_mm_close,
445};
446
447static int netlink_mmap(struct file *file, struct socket *sock,
448 struct vm_area_struct *vma)
449{
450 struct sock *sk = sock->sk;
451 struct netlink_sock *nlk = nlk_sk(sk);
452 struct netlink_ring *ring;
453 unsigned long start, size, expected;
454 unsigned int i;
455 int err = -EINVAL;
456
457 if (vma->vm_pgoff)
458 return -EINVAL;
459
460 mutex_lock(&nlk->pg_vec_lock);
461
462 expected = 0;
463 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
464 if (ring->pg_vec == NULL)
465 continue;
466 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
467 }
468
469 if (expected == 0)
470 goto out;
471
472 size = vma->vm_end - vma->vm_start;
473 if (size != expected)
474 goto out;
475
476 start = vma->vm_start;
477 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
478 if (ring->pg_vec == NULL)
479 continue;
480
481 for (i = 0; i < ring->pg_vec_len; i++) {
482 struct page *page;
483 void *kaddr = ring->pg_vec[i];
484 unsigned int pg_num;
485
486 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
487 page = pgvec_to_page(kaddr);
488 err = vm_insert_page(vma, start, page);
489 if (err < 0)
490 goto out;
491 start += PAGE_SIZE;
492 kaddr += PAGE_SIZE;
493 }
494 }
495 }
496
497 atomic_inc(&nlk->mapped);
498 vma->vm_ops = &netlink_mmap_ops;
499 err = 0;
500out:
501 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700502 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000503}
Patrick McHardy9652e932013-04-17 06:47:02 +0000504
505static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
506{
507#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
508 struct page *p_start, *p_end;
509
510 /* First page is flushed through netlink_{get,set}_status */
511 p_start = pgvec_to_page(hdr + PAGE_SIZE);
Stephen Rothwell1d5085c2013-04-23 17:40:35 +1000512 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000513 while (p_start <= p_end) {
514 flush_dcache_page(p_start);
515 p_start++;
516 }
517#endif
518}
519
520static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
521{
522 smp_rmb();
523 flush_dcache_page(pgvec_to_page(hdr));
524 return hdr->nm_status;
525}
526
527static void netlink_set_status(struct nl_mmap_hdr *hdr,
528 enum nl_mmap_status status)
529{
530 hdr->nm_status = status;
531 flush_dcache_page(pgvec_to_page(hdr));
532 smp_wmb();
533}
534
535static struct nl_mmap_hdr *
536__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
537{
538 unsigned int pg_vec_pos, frame_off;
539
540 pg_vec_pos = pos / ring->frames_per_block;
541 frame_off = pos % ring->frames_per_block;
542
543 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
544}
545
546static struct nl_mmap_hdr *
547netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
548 enum nl_mmap_status status)
549{
550 struct nl_mmap_hdr *hdr;
551
552 hdr = __netlink_lookup_frame(ring, pos);
553 if (netlink_get_status(hdr) != status)
554 return NULL;
555
556 return hdr;
557}
558
559static struct nl_mmap_hdr *
560netlink_current_frame(const struct netlink_ring *ring,
561 enum nl_mmap_status status)
562{
563 return netlink_lookup_frame(ring, ring->head, status);
564}
565
566static struct nl_mmap_hdr *
567netlink_previous_frame(const struct netlink_ring *ring,
568 enum nl_mmap_status status)
569{
570 unsigned int prev;
571
572 prev = ring->head ? ring->head - 1 : ring->frame_max;
573 return netlink_lookup_frame(ring, prev, status);
574}
575
576static void netlink_increment_head(struct netlink_ring *ring)
577{
578 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
579}
580
581static void netlink_forward_ring(struct netlink_ring *ring)
582{
583 unsigned int head = ring->head, pos = head;
584 const struct nl_mmap_hdr *hdr;
585
586 do {
587 hdr = __netlink_lookup_frame(ring, pos);
588 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
589 break;
590 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
591 break;
592 netlink_increment_head(ring);
593 } while (ring->head != head);
594}
595
Patrick McHardycd1df522013-04-17 06:47:05 +0000596static bool netlink_dump_space(struct netlink_sock *nlk)
597{
598 struct netlink_ring *ring = &nlk->rx_ring;
599 struct nl_mmap_hdr *hdr;
600 unsigned int n;
601
602 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
603 if (hdr == NULL)
604 return false;
605
606 n = ring->head + ring->frame_max / 2;
607 if (n > ring->frame_max)
608 n -= ring->frame_max;
609
610 hdr = __netlink_lookup_frame(ring, n);
611
612 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
613}
614
Patrick McHardy9652e932013-04-17 06:47:02 +0000615static unsigned int netlink_poll(struct file *file, struct socket *sock,
616 poll_table *wait)
617{
618 struct sock *sk = sock->sk;
619 struct netlink_sock *nlk = nlk_sk(sk);
620 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000621 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000622
Patrick McHardycd1df522013-04-17 06:47:05 +0000623 if (nlk->rx_ring.pg_vec != NULL) {
624 /* Memory mapped sockets don't call recvmsg(), so flow control
625 * for dumps is performed here. A dump is allowed to continue
626 * if at least half the ring is unused.
627 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700628 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000629 err = netlink_dump(sk);
630 if (err < 0) {
631 sk->sk_err = err;
632 sk->sk_error_report(sk);
633 break;
634 }
635 }
636 netlink_rcv_wake(sk);
637 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000638
Patrick McHardy9652e932013-04-17 06:47:02 +0000639 mask = datagram_poll(file, sock, wait);
640
641 spin_lock_bh(&sk->sk_receive_queue.lock);
642 if (nlk->rx_ring.pg_vec) {
643 netlink_forward_ring(&nlk->rx_ring);
644 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
645 mask |= POLLIN | POLLRDNORM;
646 }
647 spin_unlock_bh(&sk->sk_receive_queue.lock);
648
649 spin_lock_bh(&sk->sk_write_queue.lock);
650 if (nlk->tx_ring.pg_vec) {
651 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
652 mask |= POLLOUT | POLLWRNORM;
653 }
654 spin_unlock_bh(&sk->sk_write_queue.lock);
655
656 return mask;
657}
658
659static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
660{
661 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
662}
663
664static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
665 struct netlink_ring *ring,
666 struct nl_mmap_hdr *hdr)
667{
668 unsigned int size;
669 void *data;
670
671 size = ring->frame_size - NL_MMAP_HDRLEN;
672 data = (void *)hdr + NL_MMAP_HDRLEN;
673
674 skb->head = data;
675 skb->data = data;
676 skb_reset_tail_pointer(skb);
677 skb->end = skb->tail + size;
678 skb->len = 0;
679
680 skb->destructor = netlink_skb_destructor;
681 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
682 NETLINK_CB(skb).sk = sk;
683}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000684
685static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
686 u32 dst_portid, u32 dst_group,
687 struct sock_iocb *siocb)
688{
689 struct netlink_sock *nlk = nlk_sk(sk);
690 struct netlink_ring *ring;
691 struct nl_mmap_hdr *hdr;
692 struct sk_buff *skb;
693 unsigned int maxlen;
694 bool excl = true;
695 int err = 0, len = 0;
696
697 /* Netlink messages are validated by the receiver before processing.
698 * In order to avoid userspace changing the contents of the message
699 * after validation, the socket and the ring may only be used by a
700 * single process, otherwise we fall back to copying.
701 */
702 if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
703 atomic_read(&nlk->mapped) > 1)
704 excl = false;
705
706 mutex_lock(&nlk->pg_vec_lock);
707
708 ring = &nlk->tx_ring;
709 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
710
711 do {
712 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
713 if (hdr == NULL) {
714 if (!(msg->msg_flags & MSG_DONTWAIT) &&
715 atomic_read(&nlk->tx_ring.pending))
716 schedule();
717 continue;
718 }
719 if (hdr->nm_len > maxlen) {
720 err = -EINVAL;
721 goto out;
722 }
723
724 netlink_frame_flush_dcache(hdr);
725
726 if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
727 skb = alloc_skb_head(GFP_KERNEL);
728 if (skb == NULL) {
729 err = -ENOBUFS;
730 goto out;
731 }
732 sock_hold(sk);
733 netlink_ring_setup_skb(skb, sk, ring, hdr);
734 NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
735 __skb_put(skb, hdr->nm_len);
736 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
737 atomic_inc(&ring->pending);
738 } else {
739 skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
740 if (skb == NULL) {
741 err = -ENOBUFS;
742 goto out;
743 }
744 __skb_put(skb, hdr->nm_len);
745 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
746 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
747 }
748
749 netlink_increment_head(ring);
750
751 NETLINK_CB(skb).portid = nlk->portid;
752 NETLINK_CB(skb).dst_group = dst_group;
753 NETLINK_CB(skb).creds = siocb->scm->creds;
754
755 err = security_netlink_send(sk, skb);
756 if (err) {
757 kfree_skb(skb);
758 goto out;
759 }
760
761 if (unlikely(dst_group)) {
762 atomic_inc(&skb->users);
763 netlink_broadcast(sk, skb, dst_portid, dst_group,
764 GFP_KERNEL);
765 }
766 err = netlink_unicast(sk, skb, dst_portid,
767 msg->msg_flags & MSG_DONTWAIT);
768 if (err < 0)
769 goto out;
770 len += err;
771
772 } while (hdr != NULL ||
773 (!(msg->msg_flags & MSG_DONTWAIT) &&
774 atomic_read(&nlk->tx_ring.pending)));
775
776 if (len > 0)
777 err = len;
778out:
779 mutex_unlock(&nlk->pg_vec_lock);
780 return err;
781}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000782
783static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
784{
785 struct nl_mmap_hdr *hdr;
786
787 hdr = netlink_mmap_hdr(skb);
788 hdr->nm_len = skb->len;
789 hdr->nm_group = NETLINK_CB(skb).dst_group;
790 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200791 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
792 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000793 netlink_frame_flush_dcache(hdr);
794 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
795
796 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
797 kfree_skb(skb);
798}
799
800static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
801{
802 struct netlink_sock *nlk = nlk_sk(sk);
803 struct netlink_ring *ring = &nlk->rx_ring;
804 struct nl_mmap_hdr *hdr;
805
806 spin_lock_bh(&sk->sk_receive_queue.lock);
807 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
808 if (hdr == NULL) {
809 spin_unlock_bh(&sk->sk_receive_queue.lock);
810 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000811 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000812 return;
813 }
814 netlink_increment_head(ring);
815 __skb_queue_tail(&sk->sk_receive_queue, skb);
816 spin_unlock_bh(&sk->sk_receive_queue.lock);
817
818 hdr->nm_len = skb->len;
819 hdr->nm_group = NETLINK_CB(skb).dst_group;
820 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200821 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
822 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000823 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
824}
825
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000826#else /* CONFIG_NETLINK_MMAP */
Patrick McHardy9652e932013-04-17 06:47:02 +0000827#define netlink_skb_is_mmaped(skb) false
Patrick McHardyf9c22882013-04-17 06:47:04 +0000828#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000829#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000830#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000831#define netlink_poll datagram_poll
Patrick McHardy5fd96122013-04-17 06:47:03 +0000832#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000833#endif /* CONFIG_NETLINK_MMAP */
834
Patrick McHardycf0a0182013-04-17 06:47:00 +0000835static void netlink_skb_destructor(struct sk_buff *skb)
836{
Patrick McHardy9652e932013-04-17 06:47:02 +0000837#ifdef CONFIG_NETLINK_MMAP
838 struct nl_mmap_hdr *hdr;
839 struct netlink_ring *ring;
840 struct sock *sk;
841
842 /* If a packet from the kernel to userspace was freed because of an
843 * error without being delivered to userspace, the kernel must reset
844 * the status. In the direction userspace to kernel, the status is
845 * always reset here after the packet was processed and freed.
846 */
847 if (netlink_skb_is_mmaped(skb)) {
848 hdr = netlink_mmap_hdr(skb);
849 sk = NETLINK_CB(skb).sk;
850
Patrick McHardy5fd96122013-04-17 06:47:03 +0000851 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
852 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
853 ring = &nlk_sk(sk)->tx_ring;
854 } else {
855 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
856 hdr->nm_len = 0;
857 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
858 }
859 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000860 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000861
862 WARN_ON(atomic_read(&ring->pending) == 0);
863 atomic_dec(&ring->pending);
864 sock_put(sk);
865
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000866 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000867 }
868#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000869 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200870 if (!skb->cloned ||
871 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
872 vfree(skb->head);
873
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000874 skb->head = NULL;
875 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000876 if (skb->sk != NULL)
877 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000878}
879
880static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
881{
882 WARN_ON(skb->sk != NULL);
883 skb->sk = sk;
884 skb->destructor = netlink_skb_destructor;
885 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
886 sk_mem_charge(sk, skb->truesize);
887}
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889static void netlink_sock_destruct(struct sock *sk)
890{
Herbert Xu3f660d62007-05-03 03:17:14 -0700891 struct netlink_sock *nlk = nlk_sk(sk);
892
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700893 if (nlk->cb_running) {
894 if (nlk->cb.done)
895 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000896
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700897 module_put(nlk->cb.module);
898 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700899 }
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000902#ifdef CONFIG_NETLINK_MMAP
903 if (1) {
904 struct nl_mmap_req req;
905
906 memset(&req, 0, sizeof(req));
907 if (nlk->rx_ring.pg_vec)
908 netlink_set_ring(sk, &req, true, false);
909 memset(&req, 0, sizeof(req));
910 if (nlk->tx_ring.pg_vec)
911 netlink_set_ring(sk, &req, true, true);
912 }
913#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
915 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800916 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 return;
918 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700919
920 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
921 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
922 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800925/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
926 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
928 * this, _but_ remember, it adds useless work on UP machines.
929 */
930
Johannes Bergd136f1b2009-09-12 03:03:15 +0000931void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800932 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000934 might_sleep();
935
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700936 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
938 if (atomic_read(&nl_table_users)) {
939 DECLARE_WAITQUEUE(wait, current);
940
941 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800942 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 set_current_state(TASK_UNINTERRUPTIBLE);
944 if (atomic_read(&nl_table_users) == 0)
945 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700946 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700948 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 }
950
951 __set_current_state(TASK_RUNNING);
952 remove_wait_queue(&nl_table_wait, &wait);
953 }
954}
955
Johannes Bergd136f1b2009-09-12 03:03:15 +0000956void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800957 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700959 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 wake_up(&nl_table_wait);
961}
962
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800963static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964netlink_lock_table(void)
965{
966 /* read_lock() synchronizes us to netlink_table_grab */
967
968 read_lock(&nl_table_lock);
969 atomic_inc(&nl_table_users);
970 read_unlock(&nl_table_lock);
971}
972
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800973static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974netlink_unlock_table(void)
975{
976 if (atomic_dec_and_test(&nl_table_users))
977 wake_up(&nl_table_wait);
978}
979
Gao fengda12c902013-06-06 14:49:11 +0800980static bool netlink_compare(struct net *net, struct sock *sk)
981{
982 return net_eq(sock_net(sk), net);
983}
984
Eric W. Biederman15e47302012-09-07 20:12:54 +0000985static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
Gao fengda12c902013-06-06 14:49:11 +0800987 struct netlink_table *table = &nl_table[protocol];
988 struct nl_portid_hash *hash = &table->hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 struct hlist_head *head;
990 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
992 read_lock(&nl_table_lock);
Eric W. Biederman15e47302012-09-07 20:12:54 +0000993 head = nl_portid_hashfn(hash, portid);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800994 sk_for_each(sk, head) {
Gao fengda12c902013-06-06 14:49:11 +0800995 if (table->compare(net, sk) &&
996 (nlk_sk(sk)->portid == portid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 sock_hold(sk);
998 goto found;
999 }
1000 }
1001 sk = NULL;
1002found:
1003 read_unlock(&nl_table_lock);
1004 return sk;
1005}
1006
Eric W. Biederman15e47302012-09-07 20:12:54 +00001007static struct hlist_head *nl_portid_hash_zalloc(size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008{
1009 if (size <= PAGE_SIZE)
Eric Dumazetea729122007-12-11 02:09:47 -08001010 return kzalloc(size, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 else
1012 return (struct hlist_head *)
Eric Dumazetea729122007-12-11 02:09:47 -08001013 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
1014 get_order(size));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
Eric W. Biederman15e47302012-09-07 20:12:54 +00001017static void nl_portid_hash_free(struct hlist_head *table, size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018{
1019 if (size <= PAGE_SIZE)
1020 kfree(table);
1021 else
1022 free_pages((unsigned long)table, get_order(size));
1023}
1024
Eric W. Biederman15e47302012-09-07 20:12:54 +00001025static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026{
1027 unsigned int omask, mask, shift;
1028 size_t osize, size;
1029 struct hlist_head *otable, *table;
1030 int i;
1031
1032 omask = mask = hash->mask;
1033 osize = size = (mask + 1) * sizeof(*table);
1034 shift = hash->shift;
1035
1036 if (grow) {
1037 if (++shift > hash->max_shift)
1038 return 0;
1039 mask = mask * 2 + 1;
1040 size *= 2;
1041 }
1042
Eric W. Biederman15e47302012-09-07 20:12:54 +00001043 table = nl_portid_hash_zalloc(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 if (!table)
1045 return 0;
1046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 otable = hash->table;
1048 hash->table = table;
1049 hash->mask = mask;
1050 hash->shift = shift;
1051 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
1052
1053 for (i = 0; i <= omask; i++) {
1054 struct sock *sk;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001055 struct hlist_node *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
Sasha Levinb67bfe02013-02-27 17:06:00 -08001057 sk_for_each_safe(sk, tmp, &otable[i])
Eric W. Biederman15e47302012-09-07 20:12:54 +00001058 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 }
1060
Eric W. Biederman15e47302012-09-07 20:12:54 +00001061 nl_portid_hash_free(otable, osize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 hash->rehash_time = jiffies + 10 * 60 * HZ;
1063 return 1;
1064}
1065
Eric W. Biederman15e47302012-09-07 20:12:54 +00001066static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067{
1068 int avg = hash->entries >> hash->shift;
1069
Eric W. Biederman15e47302012-09-07 20:12:54 +00001070 if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 return 1;
1072
1073 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001074 nl_portid_hash_rehash(hash, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 return 1;
1076 }
1077
1078 return 0;
1079}
1080
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001081static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
Patrick McHardy4277a082006-03-20 18:52:01 -08001083static void
1084netlink_update_listeners(struct sock *sk)
1085{
1086 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001087 unsigned long mask;
1088 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001089 struct listeners *listeners;
1090
1091 listeners = nl_deref_protected(tbl->listeners);
1092 if (!listeners)
1093 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001094
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001095 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001096 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001097 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001098 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1099 mask |= nlk_sk(sk)->groups[i];
1100 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001101 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001102 }
1103 /* this function is only called with the netlink table "grabbed", which
1104 * makes sure updates are visible before bind or setsockopt return. */
1105}
1106
Eric W. Biederman15e47302012-09-07 20:12:54 +00001107static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108{
Gao fengda12c902013-06-06 14:49:11 +08001109 struct netlink_table *table = &nl_table[sk->sk_protocol];
1110 struct nl_portid_hash *hash = &table->hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 struct hlist_head *head;
1112 int err = -EADDRINUSE;
1113 struct sock *osk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 int len;
1115
1116 netlink_table_grab();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001117 head = nl_portid_hashfn(hash, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 len = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001119 sk_for_each(osk, head) {
Gao fengda12c902013-06-06 14:49:11 +08001120 if (table->compare(net, osk) &&
1121 (nlk_sk(osk)->portid == portid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 break;
1123 len++;
1124 }
Sasha Levinb67bfe02013-02-27 17:06:00 -08001125 if (osk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 goto err;
1127
1128 err = -EBUSY;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001129 if (nlk_sk(sk)->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 goto err;
1131
1132 err = -ENOMEM;
1133 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
1134 goto err;
1135
Eric W. Biederman15e47302012-09-07 20:12:54 +00001136 if (len && nl_portid_hash_dilute(hash, len))
1137 head = nl_portid_hashfn(hash, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 hash->entries++;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001139 nlk_sk(sk)->portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 sk_add_node(sk, head);
1141 err = 0;
1142
1143err:
1144 netlink_table_ungrab();
1145 return err;
1146}
1147
1148static void netlink_remove(struct sock *sk)
1149{
1150 netlink_table_grab();
David S. Millerd470e3b2005-06-26 15:31:51 -07001151 if (sk_del_node_init(sk))
1152 nl_table[sk->sk_protocol].hash.entries--;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001153 if (nlk_sk(sk)->subscriptions)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 __sk_del_bind_node(sk);
1155 netlink_table_ungrab();
1156}
1157
1158static struct proto netlink_proto = {
1159 .name = "NETLINK",
1160 .owner = THIS_MODULE,
1161 .obj_size = sizeof(struct netlink_sock),
1162};
1163
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001164static int __netlink_create(struct net *net, struct socket *sock,
1165 struct mutex *cb_mutex, int protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
1167 struct sock *sk;
1168 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001169
1170 sock->ops = &netlink_ops;
1171
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001172 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
Patrick McHardyab33a172005-08-14 19:31:36 -07001173 if (!sk)
1174 return -ENOMEM;
1175
1176 sock_init_data(sock, sk);
1177
1178 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001179 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001180 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001181 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001182 nlk->cb_mutex = &nlk->cb_def_mutex;
1183 mutex_init(nlk->cb_mutex);
1184 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001185 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001186#ifdef CONFIG_NETLINK_MMAP
1187 mutex_init(&nlk->pg_vec_lock);
1188#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001189
1190 sk->sk_destruct = netlink_sock_destruct;
1191 sk->sk_protocol = protocol;
1192 return 0;
1193}
1194
Eric Paris3f378b62009-11-05 22:18:14 -08001195static int netlink_create(struct net *net, struct socket *sock, int protocol,
1196 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001197{
1198 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001199 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001200 struct netlink_sock *nlk;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001201 void (*bind)(int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001202 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
1204 sock->state = SS_UNCONNECTED;
1205
1206 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1207 return -ESOCKTNOSUPPORT;
1208
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001209 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 return -EPROTONOSUPPORT;
1211
Patrick McHardy77247bb2005-08-14 19:27:13 -07001212 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001213#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001214 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001215 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001216 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001217 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001218 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001219#endif
1220 if (nl_table[protocol].registered &&
1221 try_module_get(nl_table[protocol].module))
1222 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001223 else
1224 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001225 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001226 bind = nl_table[protocol].bind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001227 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001228
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001229 if (err < 0)
1230 goto out;
1231
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001232 err = __netlink_create(net, sock, cb_mutex, protocol);
1233 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001234 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
David S. Miller6f756a82008-11-23 17:34:03 -08001236 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001237 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001238 local_bh_enable();
1239
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001240 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001241 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001242 nlk->netlink_bind = bind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001243out:
1244 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Patrick McHardyab33a172005-08-14 19:31:36 -07001246out_module:
1247 module_put(module);
1248 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249}
1250
1251static int netlink_release(struct socket *sock)
1252{
1253 struct sock *sk = sock->sk;
1254 struct netlink_sock *nlk;
1255
1256 if (!sk)
1257 return 0;
1258
1259 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001260 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 nlk = nlk_sk(sk);
1262
Herbert Xu3f660d62007-05-03 03:17:14 -07001263 /*
1264 * OK. Socket is unlinked, any packets that arrive now
1265 * will be purged.
1266 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 sock->sk = NULL;
1269 wake_up_interruptible_all(&nlk->wait);
1270
1271 skb_queue_purge(&sk->sk_write_queue);
1272
Eric W. Biederman15e47302012-09-07 20:12:54 +00001273 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001275 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001277 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 };
Alan Sterne041c682006-03-27 01:16:30 -08001279 atomic_notifier_call_chain(&netlink_chain,
1280 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001281 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001282
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001283 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001284
Patrick McHardy4277a082006-03-20 18:52:01 -08001285 netlink_table_grab();
Denis V. Lunevaed81562007-10-10 21:14:32 -07001286 if (netlink_is_kernel(sk)) {
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001287 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1288 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001289 struct listeners *old;
1290
1291 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1292 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1293 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001294 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001295 nl_table[sk->sk_protocol].bind = NULL;
1296 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001297 nl_table[sk->sk_protocol].registered = 0;
1298 }
Eric Dumazet658cb352012-04-22 21:30:21 +00001299 } else if (nlk->subscriptions) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001300 netlink_update_listeners(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001301 }
Patrick McHardy4277a082006-03-20 18:52:01 -08001302 netlink_table_ungrab();
Patrick McHardy77247bb2005-08-14 19:27:13 -07001303
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001304 kfree(nlk->groups);
1305 nlk->groups = NULL;
1306
Eric Dumazet37558102008-11-24 14:05:22 -08001307 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001308 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001309 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 sock_put(sk);
1311 return 0;
1312}
1313
1314static int netlink_autobind(struct socket *sock)
1315{
1316 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001317 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001318 struct netlink_table *table = &nl_table[sk->sk_protocol];
1319 struct nl_portid_hash *hash = &table->hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 struct hlist_head *head;
1321 struct sock *osk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001322 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 int err;
1324 static s32 rover = -4097;
1325
1326retry:
1327 cond_resched();
1328 netlink_table_grab();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001329 head = nl_portid_hashfn(hash, portid);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001330 sk_for_each(osk, head) {
Gao fengda12c902013-06-06 14:49:11 +08001331 if (!table->compare(net, osk))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001332 continue;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001333 if (nlk_sk(osk)->portid == portid) {
1334 /* Bind collision, search negative portid values. */
1335 portid = rover--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 if (rover > -4097)
1337 rover = -4097;
1338 netlink_table_ungrab();
1339 goto retry;
1340 }
1341 }
1342 netlink_table_ungrab();
1343
Eric W. Biederman15e47302012-09-07 20:12:54 +00001344 err = netlink_insert(sk, net, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 if (err == -EADDRINUSE)
1346 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001347
1348 /* If 2 threads race to autobind, that is fine. */
1349 if (err == -EBUSY)
1350 err = 0;
1351
1352 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353}
1354
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001355static inline int netlink_capable(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001356{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001357 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001358 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001359}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001361static void
1362netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1363{
1364 struct netlink_sock *nlk = nlk_sk(sk);
1365
1366 if (nlk->subscriptions && !subscriptions)
1367 __sk_del_bind_node(sk);
1368 else if (!nlk->subscriptions && subscriptions)
1369 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1370 nlk->subscriptions = subscriptions;
1371}
1372
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001373static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001374{
1375 struct netlink_sock *nlk = nlk_sk(sk);
1376 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001377 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001378 int err = 0;
1379
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001380 netlink_table_grab();
1381
Patrick McHardy513c2502005-09-06 15:43:59 -07001382 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001383 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001384 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001385 goto out_unlock;
1386 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001387
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001388 if (nlk->ngroups >= groups)
1389 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001390
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001391 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1392 if (new_groups == NULL) {
1393 err = -ENOMEM;
1394 goto out_unlock;
1395 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001396 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001397 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1398
1399 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001400 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001401 out_unlock:
1402 netlink_table_ungrab();
1403 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001404}
1405
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001406static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1407 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408{
1409 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001410 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 struct netlink_sock *nlk = nlk_sk(sk);
1412 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1413 int err;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001414
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001415 if (addr_len < sizeof(struct sockaddr_nl))
1416 return -EINVAL;
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 if (nladdr->nl_family != AF_NETLINK)
1419 return -EINVAL;
1420
1421 /* Only superuser is allowed to listen multicasts */
Patrick McHardy513c2502005-09-06 15:43:59 -07001422 if (nladdr->nl_groups) {
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001423 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001424 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001425 err = netlink_realloc_groups(sk);
1426 if (err)
1427 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
Eric W. Biederman15e47302012-09-07 20:12:54 +00001430 if (nlk->portid) {
1431 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 return -EINVAL;
1433 } else {
1434 err = nladdr->nl_pid ?
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001435 netlink_insert(sk, net, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 netlink_autobind(sock);
1437 if (err)
1438 return err;
1439 }
1440
Patrick McHardy513c2502005-09-06 15:43:59 -07001441 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 return 0;
1443
1444 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001445 netlink_update_subscriptions(sk, nlk->subscriptions +
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001446 hweight32(nladdr->nl_groups) -
1447 hweight32(nlk->groups[0]));
1448 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001449 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 netlink_table_ungrab();
1451
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001452 if (nlk->netlink_bind && nlk->groups[0]) {
1453 int i;
1454
1455 for (i=0; i<nlk->ngroups; i++) {
1456 if (test_bit(i, nlk->groups))
1457 nlk->netlink_bind(i);
1458 }
1459 }
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return 0;
1462}
1463
1464static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1465 int alen, int flags)
1466{
1467 int err = 0;
1468 struct sock *sk = sock->sk;
1469 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001470 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471
Changli Gao6503d962010-03-31 22:58:26 +00001472 if (alen < sizeof(addr->sa_family))
1473 return -EINVAL;
1474
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 if (addr->sa_family == AF_UNSPEC) {
1476 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001477 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001478 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return 0;
1480 }
1481 if (addr->sa_family != AF_NETLINK)
1482 return -EINVAL;
1483
1484 /* Only superuser is allowed to send multicasts */
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001485 if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 return -EPERM;
1487
Eric W. Biederman15e47302012-09-07 20:12:54 +00001488 if (!nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 err = netlink_autobind(sock);
1490
1491 if (err == 0) {
1492 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001493 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001494 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 }
1496
1497 return err;
1498}
1499
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001500static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1501 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502{
1503 struct sock *sk = sock->sk;
1504 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001505 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001506
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 nladdr->nl_family = AF_NETLINK;
1508 nladdr->nl_pad = 0;
1509 *addr_len = sizeof(*nladdr);
1510
1511 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001512 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001513 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001515 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001516 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 }
1518 return 0;
1519}
1520
Eric W. Biederman15e47302012-09-07 20:12:54 +00001521static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 struct sock *sock;
1524 struct netlink_sock *nlk;
1525
Eric W. Biederman15e47302012-09-07 20:12:54 +00001526 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 if (!sock)
1528 return ERR_PTR(-ECONNREFUSED);
1529
1530 /* Don't bother queuing skb if kernel socket has no input function */
1531 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001532 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001533 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 sock_put(sock);
1535 return ERR_PTR(-ECONNREFUSED);
1536 }
1537 return sock;
1538}
1539
1540struct sock *netlink_getsockbyfilp(struct file *filp)
1541{
Al Viro496ad9a2013-01-23 17:07:38 -05001542 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 struct sock *sock;
1544
1545 if (!S_ISSOCK(inode->i_mode))
1546 return ERR_PTR(-ENOTSOCK);
1547
1548 sock = SOCKET_I(inode)->sk;
1549 if (sock->sk_family != AF_NETLINK)
1550 return ERR_PTR(-EINVAL);
1551
1552 sock_hold(sock);
1553 return sock;
1554}
1555
Pablo Neira3a365152013-06-28 03:04:23 +02001556static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1557 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001558{
1559 struct sk_buff *skb;
1560 void *data;
1561
Pablo Neira3a365152013-06-28 03:04:23 +02001562 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001563 return alloc_skb(size, GFP_KERNEL);
1564
Pablo Neira3a365152013-06-28 03:04:23 +02001565 size = SKB_DATA_ALIGN(size) +
1566 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001567
1568 data = vmalloc(size);
1569 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001570 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001571
Pablo Neira3a365152013-06-28 03:04:23 +02001572 skb = build_skb(data, size);
1573 if (skb == NULL)
1574 vfree(data);
1575 else {
1576 skb->head_frag = 0;
1577 skb->destructor = netlink_skb_destructor;
1578 }
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001579
1580 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001581}
1582
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583/*
1584 * Attach a skb to a netlink socket.
1585 * The caller must hold a reference to the destination socket. On error, the
1586 * reference is dropped. The skb is not send to the destination, just all
1587 * all error checks are performed and memory in the queue is reserved.
1588 * Return values:
1589 * < 0: error. skb freed, reference to sock dropped.
1590 * 0: continue
1591 * 1: repeat lookup - reference dropped while waiting for socket memory.
1592 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001593int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001594 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595{
1596 struct netlink_sock *nlk;
1597
1598 nlk = nlk_sk(sk);
1599
Patrick McHardy5fd96122013-04-17 06:47:03 +00001600 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1601 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1602 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001604 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001605 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 netlink_overrun(sk);
1607 sock_put(sk);
1608 kfree_skb(skb);
1609 return -EAGAIN;
1610 }
1611
1612 __set_current_state(TASK_INTERRUPTIBLE);
1613 add_wait_queue(&nlk->wait, &wait);
1614
1615 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Patrick McHardycd967e02013-04-17 06:46:56 +00001616 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001618 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
1620 __set_current_state(TASK_RUNNING);
1621 remove_wait_queue(&nlk->wait, &wait);
1622 sock_put(sk);
1623
1624 if (signal_pending(current)) {
1625 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001626 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 }
1628 return 1;
1629 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001630 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 return 0;
1632}
1633
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001634static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 int len = skb->len;
1637
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001638 netlink_deliver_tap(skb);
1639
Patrick McHardyf9c22882013-04-17 06:47:04 +00001640#ifdef CONFIG_NETLINK_MMAP
1641 if (netlink_skb_is_mmaped(skb))
1642 netlink_queue_mmaped_skb(sk, skb);
1643 else if (netlink_rx_is_mmaped(sk))
1644 netlink_ring_set_copied(sk, skb);
1645 else
1646#endif /* CONFIG_NETLINK_MMAP */
1647 skb_queue_tail(&sk->sk_receive_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 sk->sk_data_ready(sk, len);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001649 return len;
1650}
1651
1652int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1653{
1654 int len = __netlink_sendskb(sk, skb);
1655
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 sock_put(sk);
1657 return len;
1658}
1659
1660void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1661{
1662 kfree_skb(skb);
1663 sock_put(sk);
1664}
1665
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001666static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667{
1668 int delta;
1669
Patrick McHardy1298ca42013-04-17 06:46:59 +00001670 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001671 if (netlink_skb_is_mmaped(skb))
1672 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001674 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001675 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 return skb;
1677
1678 if (skb_shared(skb)) {
1679 struct sk_buff *nskb = skb_clone(skb, allocation);
1680 if (!nskb)
1681 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001682 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 skb = nskb;
1684 }
1685
1686 if (!pskb_expand_head(skb, 0, -delta, allocation))
1687 skb->truesize -= delta;
1688
1689 return skb;
1690}
1691
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001692static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1693 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001694{
1695 int ret;
1696 struct netlink_sock *nlk = nlk_sk(sk);
1697
1698 ret = -ECONNREFUSED;
1699 if (nlk->netlink_rcv != NULL) {
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001700 /* We could do a netlink_deliver_tap(skb) here as well
1701 * but since this is intended for the kernel only, we
1702 * should rather let it stay under the hood.
1703 */
1704
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001705 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001706 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001707 NETLINK_CB(skb).sk = ssk;
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001708 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001709 consume_skb(skb);
1710 } else {
1711 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001712 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001713 sock_put(sk);
1714 return ret;
1715}
1716
1717int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001718 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719{
1720 struct sock *sk;
1721 int err;
1722 long timeo;
1723
1724 skb = netlink_trim(skb, gfp_any());
1725
1726 timeo = sock_sndtimeo(ssk, nonblock);
1727retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001728 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 if (IS_ERR(sk)) {
1730 kfree_skb(skb);
1731 return PTR_ERR(sk);
1732 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001733 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001734 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001735
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001736 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001737 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001738 kfree_skb(skb);
1739 sock_put(sk);
1740 return err;
1741 }
1742
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001743 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 if (err == 1)
1745 goto retry;
1746 if (err)
1747 return err;
1748
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001749 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001751EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Patrick McHardyf9c22882013-04-17 06:47:04 +00001753struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1754 u32 dst_portid, gfp_t gfp_mask)
1755{
1756#ifdef CONFIG_NETLINK_MMAP
1757 struct sock *sk = NULL;
1758 struct sk_buff *skb;
1759 struct netlink_ring *ring;
1760 struct nl_mmap_hdr *hdr;
1761 unsigned int maxlen;
1762
1763 sk = netlink_getsockbyportid(ssk, dst_portid);
1764 if (IS_ERR(sk))
1765 goto out;
1766
1767 ring = &nlk_sk(sk)->rx_ring;
1768 /* fast-path without atomic ops for common case: non-mmaped receiver */
1769 if (ring->pg_vec == NULL)
1770 goto out_put;
1771
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001772 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1773 goto out_put;
1774
Patrick McHardyf9c22882013-04-17 06:47:04 +00001775 skb = alloc_skb_head(gfp_mask);
1776 if (skb == NULL)
1777 goto err1;
1778
1779 spin_lock_bh(&sk->sk_receive_queue.lock);
1780 /* check again under lock */
1781 if (ring->pg_vec == NULL)
1782 goto out_free;
1783
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001784 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001785 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1786 if (maxlen < size)
1787 goto out_free;
1788
1789 netlink_forward_ring(ring);
1790 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1791 if (hdr == NULL)
1792 goto err2;
1793 netlink_ring_setup_skb(skb, sk, ring, hdr);
1794 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1795 atomic_inc(&ring->pending);
1796 netlink_increment_head(ring);
1797
1798 spin_unlock_bh(&sk->sk_receive_queue.lock);
1799 return skb;
1800
1801err2:
1802 kfree_skb(skb);
1803 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001804 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001805err1:
1806 sock_put(sk);
1807 return NULL;
1808
1809out_free:
1810 kfree_skb(skb);
1811 spin_unlock_bh(&sk->sk_receive_queue.lock);
1812out_put:
1813 sock_put(sk);
1814out:
1815#endif
1816 return alloc_skb(size, gfp_mask);
1817}
1818EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1819
Patrick McHardy4277a082006-03-20 18:52:01 -08001820int netlink_has_listeners(struct sock *sk, unsigned int group)
1821{
1822 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001823 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001824
Denis V. Lunevaed81562007-10-10 21:14:32 -07001825 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001826
1827 rcu_read_lock();
1828 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1829
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001830 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001831 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001832
1833 rcu_read_unlock();
1834
Patrick McHardy4277a082006-03-20 18:52:01 -08001835 return res;
1836}
1837EXPORT_SYMBOL_GPL(netlink_has_listeners);
1838
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001839static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840{
1841 struct netlink_sock *nlk = nlk_sk(sk);
1842
1843 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Patrick McHardycd967e02013-04-17 06:46:56 +00001844 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001845 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001846 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001847 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 }
1849 return -1;
1850}
1851
1852struct netlink_broadcast_data {
1853 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001854 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001855 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 u32 group;
1857 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001858 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 int congested;
1860 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001861 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001863 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1864 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865};
1866
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001867static int do_one_broadcast(struct sock *sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 struct netlink_broadcast_data *p)
1869{
1870 struct netlink_sock *nlk = nlk_sk(sk);
1871 int val;
1872
1873 if (p->exclude_sk == sk)
1874 goto out;
1875
Eric W. Biederman15e47302012-09-07 20:12:54 +00001876 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001877 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 goto out;
1879
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001880 if (!net_eq(sock_net(sk), p->net))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001881 goto out;
1882
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 if (p->failure) {
1884 netlink_overrun(sk);
1885 goto out;
1886 }
1887
1888 sock_hold(sk);
1889 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001890 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 p->skb2 = skb_clone(p->skb, p->allocation);
1892 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001893 p->skb2 = skb_get(p->skb);
1894 /*
1895 * skb ownership may have been set when
1896 * delivered to a previous socket.
1897 */
1898 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 }
1900 }
1901 if (p->skb2 == NULL) {
1902 netlink_overrun(sk);
1903 /* Clone failed. Notify ALL listeners. */
1904 p->failure = 1;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001905 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1906 p->delivery_failure = 1;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001907 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1908 kfree_skb(p->skb2);
1909 p->skb2 = NULL;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001910 } else if (sk_filter(sk, p->skb2)) {
1911 kfree_skb(p->skb2);
1912 p->skb2 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1914 netlink_overrun(sk);
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001915 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1916 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 } else {
1918 p->congested |= val;
1919 p->delivered = 1;
1920 p->skb2 = NULL;
1921 }
1922 sock_put(sk);
1923
1924out:
1925 return 0;
1926}
1927
Eric W. Biederman15e47302012-09-07 20:12:54 +00001928int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001929 u32 group, gfp_t allocation,
1930 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1931 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001933 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 struct sock *sk;
1936
1937 skb = netlink_trim(skb, allocation);
1938
1939 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001940 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001941 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 info.group = group;
1943 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001944 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 info.congested = 0;
1946 info.delivered = 0;
1947 info.allocation = allocation;
1948 info.skb = skb;
1949 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001950 info.tx_filter = filter;
1951 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
1953 /* While we sleep in clone, do not allow to change socket list */
1954
1955 netlink_lock_table();
1956
Sasha Levinb67bfe02013-02-27 17:06:00 -08001957 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 do_one_broadcast(sk, &info);
1959
Neil Horman70d4bf62010-07-20 06:45:56 +00001960 consume_skb(skb);
Tommy S. Christensenaa1c6a62005-05-19 13:07:32 -07001961
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 netlink_unlock_table();
1963
Neil Horman70d4bf62010-07-20 06:45:56 +00001964 if (info.delivery_failure) {
1965 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001966 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00001967 }
1968 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 if (info.delivered) {
1971 if (info.congested && (allocation & __GFP_WAIT))
1972 yield();
1973 return 0;
1974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 return -ESRCH;
1976}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001977EXPORT_SYMBOL(netlink_broadcast_filtered);
1978
Eric W. Biederman15e47302012-09-07 20:12:54 +00001979int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001980 u32 group, gfp_t allocation)
1981{
Eric W. Biederman15e47302012-09-07 20:12:54 +00001982 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001983 NULL, NULL);
1984}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001985EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987struct netlink_set_err_data {
1988 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001989 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 u32 group;
1991 int code;
1992};
1993
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001994static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995{
1996 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00001997 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
1999 if (sk == p->exclude_sk)
2000 goto out;
2001
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002002 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002003 goto out;
2004
Eric W. Biederman15e47302012-09-07 20:12:54 +00002005 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002006 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 goto out;
2008
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002009 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2010 ret = 1;
2011 goto out;
2012 }
2013
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 sk->sk_err = p->code;
2015 sk->sk_error_report(sk);
2016out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002017 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018}
2019
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002020/**
2021 * netlink_set_err - report error to broadcast listeners
2022 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002023 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002024 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002025 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002026 *
2027 * This function returns the number of broadcast listeners that have set the
2028 * NETLINK_RECV_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002029 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002030int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031{
2032 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002034 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002037 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002039 /* sk->sk_err wants a positive error value */
2040 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042 read_lock(&nl_table_lock);
2043
Sasha Levinb67bfe02013-02-27 17:06:00 -08002044 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002045 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
2047 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002048 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002050EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
Johannes Berg84659eb2007-07-18 15:47:05 -07002052/* must be called with netlink table grabbed */
2053static void netlink_update_socket_mc(struct netlink_sock *nlk,
2054 unsigned int group,
2055 int is_new)
2056{
2057 int old, new = !!is_new, subscriptions;
2058
2059 old = test_bit(group - 1, nlk->groups);
2060 subscriptions = nlk->subscriptions - old + new;
2061 if (new)
2062 __set_bit(group - 1, nlk->groups);
2063 else
2064 __clear_bit(group - 1, nlk->groups);
2065 netlink_update_subscriptions(&nlk->sk, subscriptions);
2066 netlink_update_listeners(&nlk->sk);
2067}
2068
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002069static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002070 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002071{
2072 struct sock *sk = sock->sk;
2073 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002074 unsigned int val = 0;
2075 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002076
2077 if (level != SOL_NETLINK)
2078 return -ENOPROTOOPT;
2079
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002080 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2081 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002082 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002083 return -EFAULT;
2084
2085 switch (optname) {
2086 case NETLINK_PKTINFO:
2087 if (val)
2088 nlk->flags |= NETLINK_RECV_PKTINFO;
2089 else
2090 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2091 err = 0;
2092 break;
2093 case NETLINK_ADD_MEMBERSHIP:
2094 case NETLINK_DROP_MEMBERSHIP: {
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002095 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002096 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002097 err = netlink_realloc_groups(sk);
2098 if (err)
2099 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002100 if (!val || val - 1 >= nlk->ngroups)
2101 return -EINVAL;
2102 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002103 netlink_update_socket_mc(nlk, val,
2104 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002105 netlink_table_ungrab();
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002106
2107 if (nlk->netlink_bind)
2108 nlk->netlink_bind(val);
2109
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002110 err = 0;
2111 break;
2112 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002113 case NETLINK_BROADCAST_ERROR:
2114 if (val)
2115 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2116 else
2117 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2118 err = 0;
2119 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002120 case NETLINK_NO_ENOBUFS:
2121 if (val) {
2122 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
Patrick McHardycd967e02013-04-17 06:46:56 +00002123 clear_bit(NETLINK_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002124 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002125 } else {
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002126 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002127 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002128 err = 0;
2129 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002130#ifdef CONFIG_NETLINK_MMAP
2131 case NETLINK_RX_RING:
2132 case NETLINK_TX_RING: {
2133 struct nl_mmap_req req;
2134
2135 /* Rings might consume more memory than queue limits, require
2136 * CAP_NET_ADMIN.
2137 */
2138 if (!capable(CAP_NET_ADMIN))
2139 return -EPERM;
2140 if (optlen < sizeof(req))
2141 return -EINVAL;
2142 if (copy_from_user(&req, optval, sizeof(req)))
2143 return -EFAULT;
2144 err = netlink_set_ring(sk, &req, false,
2145 optname == NETLINK_TX_RING);
2146 break;
2147 }
2148#endif /* CONFIG_NETLINK_MMAP */
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002149 default:
2150 err = -ENOPROTOOPT;
2151 }
2152 return err;
2153}
2154
2155static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002156 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002157{
2158 struct sock *sk = sock->sk;
2159 struct netlink_sock *nlk = nlk_sk(sk);
2160 int len, val, err;
2161
2162 if (level != SOL_NETLINK)
2163 return -ENOPROTOOPT;
2164
2165 if (get_user(len, optlen))
2166 return -EFAULT;
2167 if (len < 0)
2168 return -EINVAL;
2169
2170 switch (optname) {
2171 case NETLINK_PKTINFO:
2172 if (len < sizeof(int))
2173 return -EINVAL;
2174 len = sizeof(int);
2175 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002176 if (put_user(len, optlen) ||
2177 put_user(val, optval))
2178 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002179 err = 0;
2180 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002181 case NETLINK_BROADCAST_ERROR:
2182 if (len < sizeof(int))
2183 return -EINVAL;
2184 len = sizeof(int);
2185 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2186 if (put_user(len, optlen) ||
2187 put_user(val, optval))
2188 return -EFAULT;
2189 err = 0;
2190 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002191 case NETLINK_NO_ENOBUFS:
2192 if (len < sizeof(int))
2193 return -EINVAL;
2194 len = sizeof(int);
2195 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2196 if (put_user(len, optlen) ||
2197 put_user(val, optval))
2198 return -EFAULT;
2199 err = 0;
2200 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002201 default:
2202 err = -ENOPROTOOPT;
2203 }
2204 return err;
2205}
2206
2207static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2208{
2209 struct nl_pktinfo info;
2210
2211 info.group = NETLINK_CB(skb).dst_group;
2212 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2213}
2214
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2216 struct msghdr *msg, size_t len)
2217{
2218 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2219 struct sock *sk = sock->sk;
2220 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002221 struct sockaddr_nl *addr = msg->msg_name;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002222 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002223 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 struct sk_buff *skb;
2225 int err;
2226 struct scm_cookie scm;
2227
2228 if (msg->msg_flags&MSG_OOB)
2229 return -EOPNOTSUPP;
2230
Eric Dumazet16e57262011-09-19 05:52:27 +00002231 if (NULL == siocb->scm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 siocb->scm = &scm;
Eric Dumazet16e57262011-09-19 05:52:27 +00002233
Eric Dumazete0e3cea2012-08-21 06:21:17 +00002234 err = scm_send(sock, msg, siocb->scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 if (err < 0)
2236 return err;
2237
2238 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002239 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002241 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002242 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002243 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002244 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002245 if ((dst_group || dst_portid) &&
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002246 !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002247 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002249 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002250 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 }
2252
Eric W. Biederman15e47302012-09-07 20:12:54 +00002253 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 err = netlink_autobind(sock);
2255 if (err)
2256 goto out;
2257 }
2258
Patrick McHardy5fd96122013-04-17 06:47:03 +00002259 if (netlink_tx_is_mmaped(sk) &&
2260 msg->msg_iov->iov_base == NULL) {
2261 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2262 siocb);
2263 goto out;
2264 }
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 err = -EMSGSIZE;
2267 if (len > sk->sk_sndbuf - 32)
2268 goto out;
2269 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002270 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002271 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 goto out;
2273
Eric W. Biederman15e47302012-09-07 20:12:54 +00002274 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002275 NETLINK_CB(skb).dst_group = dst_group;
Eric W. Biedermandbe9a412012-09-06 18:20:01 +00002276 NETLINK_CB(skb).creds = siocb->scm->creds;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 err = -EFAULT;
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002279 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 kfree_skb(skb);
2281 goto out;
2282 }
2283
2284 err = security_netlink_send(sk, skb);
2285 if (err) {
2286 kfree_skb(skb);
2287 goto out;
2288 }
2289
Patrick McHardyd629b832005-08-14 19:27:50 -07002290 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002292 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002294 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
2296out:
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002297 scm_destroy(siocb->scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 return err;
2299}
2300
2301static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2302 struct msghdr *msg, size_t len,
2303 int flags)
2304{
2305 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2306 struct scm_cookie scm;
2307 struct sock *sk = sock->sk;
2308 struct netlink_sock *nlk = nlk_sk(sk);
2309 int noblock = flags&MSG_DONTWAIT;
2310 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002311 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002312 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
2314 if (flags&MSG_OOB)
2315 return -EOPNOTSUPP;
2316
2317 copied = 0;
2318
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002319 skb = skb_recv_datagram(sk, flags, noblock, &err);
2320 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 goto out;
2322
Johannes Berg68d6ac62010-08-15 21:20:44 +00002323 data_skb = skb;
2324
Johannes Berg1dacc762009-07-01 11:26:02 +00002325#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2326 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002327 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002328 * If this skb has a frag_list, then here that means that we
2329 * will have to use the frag_list skb's data for compat tasks
2330 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002331 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002332 * If we need to send the compat skb, assign it to the
2333 * 'data_skb' variable so that it will be used below for data
2334 * copying. We keep 'skb' for everything else, including
2335 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002336 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002337 if (flags & MSG_CMSG_COMPAT)
2338 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002339 }
2340#endif
2341
Johannes Berg68d6ac62010-08-15 21:20:44 +00002342 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 if (len < copied) {
2344 msg->msg_flags |= MSG_TRUNC;
2345 copied = len;
2346 }
2347
Johannes Berg68d6ac62010-08-15 21:20:44 +00002348 skb_reset_transport_header(data_skb);
2349 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
2351 if (msg->msg_name) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002352 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 addr->nl_family = AF_NETLINK;
2354 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002355 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002356 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 msg->msg_namelen = sizeof(*addr);
2358 }
2359
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002360 if (nlk->flags & NETLINK_RECV_PKTINFO)
2361 netlink_cmsg_recv_pktinfo(msg, skb);
2362
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 if (NULL == siocb->scm) {
2364 memset(&scm, 0, sizeof(scm));
2365 siocb->scm = &scm;
2366 }
2367 siocb->scm->creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002368 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002369 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002370
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 skb_free_datagram(sk, skb);
2372
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002373 if (nlk->cb_running &&
2374 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002375 ret = netlink_dump(sk);
2376 if (ret) {
2377 sk->sk_err = ret;
2378 sk->sk_error_report(sk);
2379 }
2380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
2382 scm_recv(sock, msg, siocb->scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383out:
2384 netlink_rcv_wake(sk);
2385 return err ? : copied;
2386}
2387
2388static void netlink_data_ready(struct sock *sk, int len)
2389{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002390 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391}
2392
2393/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002394 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 * complete set of kernel non-blocking support for message
2396 * queueing.
2397 */
2398
2399struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002400__netlink_kernel_create(struct net *net, int unit, struct module *module,
2401 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402{
2403 struct socket *sock;
2404 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002405 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002406 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002407 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2408 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002410 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002412 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 return NULL;
2414
2415 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2416 return NULL;
2417
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002418 /*
2419 * We have to just have a reference on the net from sk, but don't
2420 * get_net it. Besides, we cannot get and then put the net here.
2421 * So we create one inside init_net and the move it to net.
2422 */
2423
2424 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2425 goto out_sock_release_nosk;
2426
2427 sk = sock->sk;
Denis V. Lunevedf02082008-02-29 11:18:32 -08002428 sk_change_net(sk, net);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002429
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002430 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002431 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002432 else
2433 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002434
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002435 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002436 if (!listeners)
2437 goto out_sock_release;
2438
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002440 if (cfg && cfg->input)
2441 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002443 if (netlink_insert(sk, net, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002444 goto out_sock_release;
2445
2446 nlk = nlk_sk(sk);
2447 nlk->flags |= NETLINK_KERNEL_SOCKET;
2448
2449 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002450 if (!nl_table[unit].registered) {
2451 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002452 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002453 nl_table[unit].cb_mutex = cb_mutex;
2454 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002455 if (cfg) {
2456 nl_table[unit].bind = cfg->bind;
2457 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002458 if (cfg->compare)
2459 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002460 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002461 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002462 } else {
2463 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002464 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002465 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002466 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002467 return sk;
2468
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002469out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002470 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002471 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002472 return NULL;
2473
2474out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002475 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002476 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002478EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002479
2480void
2481netlink_kernel_release(struct sock *sk)
2482{
Denis V. Lunevedf02082008-02-29 11:18:32 -08002483 sk_release_kernel(sk);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002484}
2485EXPORT_SYMBOL(netlink_kernel_release);
2486
Johannes Bergd136f1b2009-09-12 03:03:15 +00002487int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002488{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002489 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002490 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002491
2492 if (groups < 32)
2493 groups = 32;
2494
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002495 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002496 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2497 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002498 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002499 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002500 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2501 rcu_assign_pointer(tbl->listeners, new);
2502
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002503 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002504 }
2505 tbl->groups = groups;
2506
Johannes Bergd136f1b2009-09-12 03:03:15 +00002507 return 0;
2508}
2509
2510/**
2511 * netlink_change_ngroups - change number of multicast groups
2512 *
2513 * This changes the number of multicast groups that are available
2514 * on a certain netlink family. Note that it is not possible to
2515 * change the number of groups to below 32. Also note that it does
2516 * not implicitly call netlink_clear_multicast_users() when the
2517 * number of groups is reduced.
2518 *
2519 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2520 * @groups: The new number of groups.
2521 */
2522int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2523{
2524 int err;
2525
2526 netlink_table_grab();
2527 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002528 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002529
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002530 return err;
2531}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002532
Johannes Bergb8273572009-09-24 15:44:05 -07002533void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2534{
2535 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002536 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2537
Sasha Levinb67bfe02013-02-27 17:06:00 -08002538 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002539 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2540}
2541
Johannes Berg84659eb2007-07-18 15:47:05 -07002542/**
2543 * netlink_clear_multicast_users - kick off multicast listeners
2544 *
2545 * This function removes all listeners from the given group.
2546 * @ksk: The kernel netlink socket, as returned by
2547 * netlink_kernel_create().
2548 * @group: The multicast group to clear.
2549 */
2550void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2551{
Johannes Berg84659eb2007-07-18 15:47:05 -07002552 netlink_table_grab();
Johannes Bergb8273572009-09-24 15:44:05 -07002553 __netlink_clear_multicast_users(ksk, group);
Johannes Berg84659eb2007-07-18 15:47:05 -07002554 netlink_table_ungrab();
2555}
Johannes Berg84659eb2007-07-18 15:47:05 -07002556
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002557struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002558__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002559{
2560 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002561 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002562
2563 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
2564 nlh->nlmsg_type = type;
2565 nlh->nlmsg_len = size;
2566 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002567 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002568 nlh->nlmsg_seq = seq;
2569 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002570 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002571 return nlh;
2572}
2573EXPORT_SYMBOL(__nlmsg_put);
2574
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575/*
2576 * It looks a bit ugly.
2577 * It would be better to create kernel thread.
2578 */
2579
2580static int netlink_dump(struct sock *sk)
2581{
2582 struct netlink_sock *nlk = nlk_sk(sk);
2583 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002584 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 struct nlmsghdr *nlh;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002586 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002587 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002589 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002590 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002591 err = -EINVAL;
2592 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 }
2594
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002595 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002596 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2597
Patrick McHardyf9c22882013-04-17 06:47:04 +00002598 if (!netlink_rx_is_mmaped(sk) &&
2599 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2600 goto errout_skb;
2601 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002602 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002603 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002604 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002605
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 len = cb->dump(skb, cb);
2607
2608 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002609 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002610
2611 if (sk_filter(sk, skb))
2612 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002613 else
2614 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 return 0;
2616 }
2617
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002618 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2619 if (!nlh)
2620 goto errout_skb;
2621
Johannes Berg670dc282011-06-20 13:40:46 +02002622 nl_dump_check_consistent(cb, nlh);
2623
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002624 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2625
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002626 if (sk_filter(sk, skb))
2627 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002628 else
2629 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
Thomas Grafa8f74b22005-11-10 02:25:52 +01002631 if (cb->done)
2632 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002634 nlk->cb_running = false;
2635 mutex_unlock(nlk->cb_mutex);
Gao feng6dc878a2012-10-04 20:15:48 +00002636 module_put(cb->module);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002637 consume_skb(cb->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002639
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002640errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002641 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002642 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002643 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644}
2645
Gao feng6dc878a2012-10-04 20:15:48 +00002646int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2647 const struct nlmsghdr *nlh,
2648 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649{
2650 struct netlink_callback *cb;
2651 struct sock *sk;
2652 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002653 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
Patrick McHardyf9c22882013-04-17 06:47:04 +00002655 /* Memory mapped dump requests need to be copied to avoid looping
2656 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2657 * a reference to the skb.
2658 */
2659 if (netlink_skb_is_mmaped(skb)) {
2660 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002661 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002662 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002663 } else
2664 atomic_inc(&skb->users);
2665
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002666 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2667 if (sk == NULL) {
2668 ret = -ECONNREFUSED;
2669 goto error_free;
2670 }
2671
2672 nlk = nlk_sk(sk);
2673 mutex_lock(nlk->cb_mutex);
2674 /* A dump is in progress... */
2675 if (nlk->cb_running) {
2676 ret = -EBUSY;
2677 goto error_unlock;
2678 }
2679 /* add reference of module which cb->dump belongs to */
2680 if (!try_module_get(control->module)) {
2681 ret = -EPROTONOSUPPORT;
2682 goto error_unlock;
2683 }
2684
2685 cb = &nlk->cb;
2686 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002687 cb->dump = control->dump;
2688 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002690 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002691 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002692 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 cb->skb = skb;
2694
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002695 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002696
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002697 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
Andrey Vaginb44d2112011-02-21 02:40:47 +00002699 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002701
Andrey Vaginb44d2112011-02-21 02:40:47 +00002702 if (ret)
2703 return ret;
2704
Denis V. Lunev5c582982007-10-23 20:29:25 -07002705 /* We successfully started a dump, by returning -EINTR we
2706 * signal not to send ACK even if it was requested.
2707 */
2708 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002709
2710error_unlock:
2711 sock_put(sk);
2712 mutex_unlock(nlk->cb_mutex);
2713error_free:
2714 kfree_skb(skb);
2715 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716}
Gao feng6dc878a2012-10-04 20:15:48 +00002717EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718
2719void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2720{
2721 struct sk_buff *skb;
2722 struct nlmsghdr *rep;
2723 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002724 size_t payload = sizeof(*errmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Thomas Graf339bf982006-11-10 14:10:15 -08002726 /* error messages get the original request appened */
2727 if (err)
2728 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729
Patrick McHardyf9c22882013-04-17 06:47:04 +00002730 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2731 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 if (!skb) {
2733 struct sock *sk;
2734
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002735 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002736 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002737 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 if (sk) {
2739 sk->sk_err = ENOBUFS;
2740 sk->sk_error_report(sk);
2741 sock_put(sk);
2742 }
2743 return;
2744 }
2745
Eric W. Biederman15e47302012-09-07 20:12:54 +00002746 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002747 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002748 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 errmsg->error = err;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002750 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002751 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002753EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002755int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002756 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002757{
Thomas Graf82ace472005-11-10 02:25:53 +01002758 struct nlmsghdr *nlh;
2759 int err;
2760
2761 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002762 int msglen;
2763
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002764 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002765 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002766
Martin Murrayad8e4b72006-01-10 13:02:29 -08002767 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002768 return 0;
2769
Thomas Grafd35b6852007-03-22 23:28:46 -07002770 /* Only requests are handled by the kernel */
2771 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002772 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002773
Thomas Graf45e7ae72007-03-22 23:29:10 -07002774 /* Skip control messages */
2775 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002776 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002777
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002778 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002779 if (err == -EINTR)
2780 goto skip;
2781
2782ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002783 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002784 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002785
Denis V. Lunev5c582982007-10-23 20:29:25 -07002786skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002787 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002788 if (msglen > skb->len)
2789 msglen = skb->len;
2790 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002791 }
2792
2793 return 0;
2794}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002795EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002796
2797/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002798 * nlmsg_notify - send a notification netlink message
2799 * @sk: netlink socket to use
2800 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002801 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002802 * @group: destination multicast group or 0
2803 * @report: 1 to report back, 0 to disable
2804 * @flags: allocation flags
2805 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002806int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002807 unsigned int group, int report, gfp_t flags)
2808{
2809 int err = 0;
2810
2811 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002812 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002813
2814 if (report) {
2815 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002816 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002817 }
2818
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002819 /* errors reported via destination sk->sk_err, but propagate
2820 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002821 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002822 }
2823
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002824 if (report) {
2825 int err2;
2826
Eric W. Biederman15e47302012-09-07 20:12:54 +00002827 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002828 if (!err || err == -ESRCH)
2829 err = err2;
2830 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07002831
2832 return err;
2833}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002834EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002835
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836#ifdef CONFIG_PROC_FS
2837struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08002838 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 int link;
2840 int hash_idx;
2841};
2842
2843static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2844{
2845 struct nl_seq_iter *iter = seq->private;
2846 int i, j;
2847 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 loff_t off = 0;
2849
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002850 for (i = 0; i < MAX_LINKS; i++) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002851 struct nl_portid_hash *hash = &nl_table[i].hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
2853 for (j = 0; j <= hash->mask; j++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08002854 sk_for_each(s, &hash->table[j]) {
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002855 if (sock_net(s) != seq_file_net(seq))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002856 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 if (off == pos) {
2858 iter->link = i;
2859 iter->hash_idx = j;
2860 return s;
2861 }
2862 ++off;
2863 }
2864 }
2865 }
2866 return NULL;
2867}
2868
2869static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002870 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871{
2872 read_lock(&nl_table_lock);
2873 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2874}
2875
2876static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2877{
2878 struct sock *s;
2879 struct nl_seq_iter *iter;
Gao fengda12c902013-06-06 14:49:11 +08002880 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 int i, j;
2882
2883 ++*pos;
2884
2885 if (v == SEQ_START_TOKEN)
2886 return netlink_seq_socket_idx(seq, 0);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002887
Gao fengda12c902013-06-06 14:49:11 +08002888 net = seq_file_net(seq);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002889 iter = seq->private;
2890 s = v;
2891 do {
2892 s = sk_next(s);
Gao fengda12c902013-06-06 14:49:11 +08002893 } while (s && !nl_table[s->sk_protocol].compare(net, s));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 if (s)
2895 return s;
2896
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 i = iter->link;
2898 j = iter->hash_idx + 1;
2899
2900 do {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002901 struct nl_portid_hash *hash = &nl_table[i].hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
2903 for (; j <= hash->mask; j++) {
2904 s = sk_head(&hash->table[j]);
Gao fengda12c902013-06-06 14:49:11 +08002905
2906 while (s && !nl_table[s->sk_protocol].compare(net, s))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002907 s = sk_next(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 if (s) {
2909 iter->link = i;
2910 iter->hash_idx = j;
2911 return s;
2912 }
2913 }
2914
2915 j = 0;
2916 } while (++i < MAX_LINKS);
2917
2918 return NULL;
2919}
2920
2921static void netlink_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002922 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923{
2924 read_unlock(&nl_table_lock);
2925}
2926
2927
2928static int netlink_seq_show(struct seq_file *seq, void *v)
2929{
Eric Dumazet658cb352012-04-22 21:30:21 +00002930 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 seq_puts(seq,
2932 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00002933 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00002934 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 struct sock *s = v;
2936 struct netlink_sock *nlk = nlk_sk(s);
2937
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002938 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 s,
2940 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002941 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07002942 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07002943 sk_rmem_alloc_get(s),
2944 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002945 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002946 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00002947 atomic_read(&s->sk_drops),
2948 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 );
2950
2951 }
2952 return 0;
2953}
2954
Philippe De Muyter56b3d972007-07-10 23:07:31 -07002955static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 .start = netlink_seq_start,
2957 .next = netlink_seq_next,
2958 .stop = netlink_seq_stop,
2959 .show = netlink_seq_show,
2960};
2961
2962
2963static int netlink_seq_open(struct inode *inode, struct file *file)
2964{
Denis V. Luneve372c412007-11-19 22:31:54 -08002965 return seq_open_net(inode, file, &netlink_seq_ops,
2966 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002967}
2968
Arjan van de Venda7071d2007-02-12 00:55:36 -08002969static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 .owner = THIS_MODULE,
2971 .open = netlink_seq_open,
2972 .read = seq_read,
2973 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002974 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975};
2976
2977#endif
2978
2979int netlink_register_notifier(struct notifier_block *nb)
2980{
Alan Sterne041c682006-03-27 01:16:30 -08002981 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002983EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984
2985int netlink_unregister_notifier(struct notifier_block *nb)
2986{
Alan Sterne041c682006-03-27 01:16:30 -08002987 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002989EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002990
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08002991static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 .family = PF_NETLINK,
2993 .owner = THIS_MODULE,
2994 .release = netlink_release,
2995 .bind = netlink_bind,
2996 .connect = netlink_connect,
2997 .socketpair = sock_no_socketpair,
2998 .accept = sock_no_accept,
2999 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003000 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 .ioctl = sock_no_ioctl,
3002 .listen = sock_no_listen,
3003 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003004 .setsockopt = netlink_setsockopt,
3005 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 .sendmsg = netlink_sendmsg,
3007 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003008 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 .sendpage = sock_no_sendpage,
3010};
3011
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003012static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 .family = PF_NETLINK,
3014 .create = netlink_create,
3015 .owner = THIS_MODULE, /* for consistency 8) */
3016};
3017
Pavel Emelyanov46650792007-10-08 20:38:39 -07003018static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003019{
3020#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003021 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003022 return -ENOMEM;
3023#endif
3024 return 0;
3025}
3026
Pavel Emelyanov46650792007-10-08 20:38:39 -07003027static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003028{
3029#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003030 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003031#endif
3032}
3033
David S. Millerb963ea82010-08-30 19:08:01 -07003034static void __init netlink_add_usersock_entry(void)
3035{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003036 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003037 int groups = 32;
3038
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003039 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003040 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003041 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003042
3043 netlink_table_grab();
3044
3045 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003046 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003047 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3048 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003049 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003050
3051 netlink_table_ungrab();
3052}
3053
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003054static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003055 .init = netlink_net_init,
3056 .exit = netlink_net_exit,
3057};
3058
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059static int __init netlink_proto_init(void)
3060{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061 int i;
Denis Cheng26ff5dd2007-09-16 16:36:02 -07003062 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 unsigned int order;
3064 int err = proto_register(&netlink_proto, 0);
3065
3066 if (err != 0)
3067 goto out;
3068
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003069 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003071 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003072 if (!nl_table)
3073 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074
Jan Beulich44813742009-09-21 17:03:05 -07003075 if (totalram_pages >= (128 * 1024))
3076 limit = totalram_pages >> (21 - PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 else
Jan Beulich44813742009-09-21 17:03:05 -07003078 limit = totalram_pages >> (23 - PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079
Denis Cheng26ff5dd2007-09-16 16:36:02 -07003080 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
3081 limit = (1UL << order) / sizeof(struct hlist_head);
3082 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083
3084 for (i = 0; i < MAX_LINKS; i++) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00003085 struct nl_portid_hash *hash = &nl_table[i].hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
Eric W. Biederman15e47302012-09-07 20:12:54 +00003087 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 if (!hash->table) {
3089 while (i-- > 0)
Eric W. Biederman15e47302012-09-07 20:12:54 +00003090 nl_portid_hash_free(nl_table[i].hash.table,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 1 * sizeof(*hash->table));
3092 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003093 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 hash->max_shift = order;
3096 hash->shift = 0;
3097 hash->mask = 0;
3098 hash->rehash_time = jiffies;
Gao fengca15feb2013-06-13 10:05:38 +08003099
3100 nl_table[i].compare = netlink_compare;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 }
3102
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003103 INIT_LIST_HEAD(&netlink_tap_all);
3104
David S. Millerb963ea82010-08-30 19:08:01 -07003105 netlink_add_usersock_entry();
3106
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003108 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003109 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 rtnetlink_init();
3111out:
3112 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003113panic:
3114 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115}
3116
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117core_initcall(netlink_proto_init);