blob: 7a94185bde6bdec6159a629a79222cebc123e46a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Thomas Grafe3416942014-08-02 11:47:45 +020061#include <linux/rhashtable.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000062#include <asm/cacheflush.h>
Thomas Grafe3416942014-08-02 11:47:45 +020063#include <linux/hash.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010064
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020065#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <net/sock.h>
67#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010068#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Andrey Vagin0f29c762013-03-21 20:33:47 +040070#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Eric Dumazet5c398dc2010-10-24 04:27:10 +000072struct listeners {
73 struct rcu_head rcu;
74 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000075};
76
Patrick McHardycd967e02013-04-17 06:46:56 +000077/* state bits */
78#define NETLINK_CONGESTED 0x0
79
80/* flags */
Patrick McHardy77247bb2005-08-14 19:27:13 -070081#define NETLINK_KERNEL_SOCKET 0x1
Patrick McHardy9a4595b2005-08-15 12:32:15 -070082#define NETLINK_RECV_PKTINFO 0x2
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +000083#define NETLINK_BROADCAST_SEND_ERROR 0x4
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -070084#define NETLINK_RECV_NO_ENOBUFS 0x8
Patrick McHardy77247bb2005-08-14 19:27:13 -070085
David S. Miller035c4c12011-12-23 17:33:03 -050086static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070087{
88 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
89}
90
Andrey Vagin0f29c762013-03-21 20:33:47 +040091struct netlink_table *nl_table;
92EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
95
96static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +000097static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Thomas Graf78fd1d02014-10-21 22:05:38 +020099/* nl_table locking explained:
Thomas Graf21e49022015-01-02 23:00:22 +0100100 * Lookup and traversal are protected with an RCU read-side lock. Insertion
Ying Xuec5adde92015-01-12 14:52:23 +0800101 * and removal are protected with per bucket lock while using RCU list
Thomas Graf21e49022015-01-02 23:00:22 +0100102 * modification primitives and may run in parallel to RCU protected lookups.
103 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
104 * been acquired * either during or after the socket has been removed from
105 * the list and after an RCU grace period.
Thomas Graf78fd1d02014-10-21 22:05:38 +0200106 */
Andrey Vagin0f29c762013-03-21 20:33:47 +0400107DEFINE_RWLOCK(nl_table_lock);
108EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109static atomic_t nl_table_users = ATOMIC_INIT(0);
110
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000111#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
112
Alan Sterne041c682006-03-27 01:16:30 -0800113static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200115static DEFINE_SPINLOCK(netlink_tap_lock);
116static struct list_head netlink_tap_all __read_mostly;
117
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000118static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700119{
120 return group ? 1 << (group - 1) : 0;
121}
122
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200123int netlink_add_tap(struct netlink_tap *nt)
124{
125 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
126 return -EINVAL;
127
128 spin_lock(&netlink_tap_lock);
129 list_add_rcu(&nt->list, &netlink_tap_all);
130 spin_unlock(&netlink_tap_lock);
131
Markus Elfringfcd4d352014-11-18 21:03:13 +0100132 __module_get(nt->module);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200133
134 return 0;
135}
136EXPORT_SYMBOL_GPL(netlink_add_tap);
137
stephen hemminger2173f8d2013-12-30 10:49:22 -0800138static int __netlink_remove_tap(struct netlink_tap *nt)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200139{
140 bool found = false;
141 struct netlink_tap *tmp;
142
143 spin_lock(&netlink_tap_lock);
144
145 list_for_each_entry(tmp, &netlink_tap_all, list) {
146 if (nt == tmp) {
147 list_del_rcu(&nt->list);
148 found = true;
149 goto out;
150 }
151 }
152
153 pr_warn("__netlink_remove_tap: %p not found\n", nt);
154out:
155 spin_unlock(&netlink_tap_lock);
156
157 if (found && nt->module)
158 module_put(nt->module);
159
160 return found ? 0 : -ENODEV;
161}
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200162
163int netlink_remove_tap(struct netlink_tap *nt)
164{
165 int ret;
166
167 ret = __netlink_remove_tap(nt);
168 synchronize_net();
169
170 return ret;
171}
172EXPORT_SYMBOL_GPL(netlink_remove_tap);
173
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200174static bool netlink_filter_tap(const struct sk_buff *skb)
175{
176 struct sock *sk = skb->sk;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200177
178 /* We take the more conservative approach and
179 * whitelist socket protocols that may pass.
180 */
181 switch (sk->sk_protocol) {
182 case NETLINK_ROUTE:
183 case NETLINK_USERSOCK:
184 case NETLINK_SOCK_DIAG:
185 case NETLINK_NFLOG:
186 case NETLINK_XFRM:
187 case NETLINK_FIB_LOOKUP:
188 case NETLINK_NETFILTER:
189 case NETLINK_GENERIC:
Varka Bhadram498044b2014-07-16 10:59:47 +0530190 return true;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200191 }
192
Varka Bhadram498044b2014-07-16 10:59:47 +0530193 return false;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200194}
195
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200196static int __netlink_deliver_tap_skb(struct sk_buff *skb,
197 struct net_device *dev)
198{
199 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200200 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200201 int ret = -ENOMEM;
202
203 dev_hold(dev);
204 nskb = skb_clone(skb, GFP_ATOMIC);
205 if (nskb) {
206 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200207 nskb->protocol = htons((u16) sk->sk_protocol);
Daniel Borkmann604d13c2013-12-23 14:35:56 +0100208 nskb->pkt_type = netlink_is_kernel(sk) ?
209 PACKET_KERNEL : PACKET_USER;
Daniel Borkmann4e48ed82014-08-07 22:22:47 +0200210 skb_reset_network_header(nskb);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200211 ret = dev_queue_xmit(nskb);
212 if (unlikely(ret > 0))
213 ret = net_xmit_errno(ret);
214 }
215
216 dev_put(dev);
217 return ret;
218}
219
220static void __netlink_deliver_tap(struct sk_buff *skb)
221{
222 int ret;
223 struct netlink_tap *tmp;
224
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200225 if (!netlink_filter_tap(skb))
226 return;
227
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200228 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
229 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
230 if (unlikely(ret))
231 break;
232 }
233}
234
235static void netlink_deliver_tap(struct sk_buff *skb)
236{
237 rcu_read_lock();
238
239 if (unlikely(!list_empty(&netlink_tap_all)))
240 __netlink_deliver_tap(skb);
241
242 rcu_read_unlock();
243}
244
Daniel Borkmann73bfd372013-12-23 14:35:55 +0100245static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
246 struct sk_buff *skb)
247{
248 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
249 netlink_deliver_tap(skb);
250}
251
Patrick McHardycd1df522013-04-17 06:47:05 +0000252static void netlink_overrun(struct sock *sk)
253{
254 struct netlink_sock *nlk = nlk_sk(sk);
255
256 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
257 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
258 sk->sk_err = ENOBUFS;
259 sk->sk_error_report(sk);
260 }
261 }
262 atomic_inc(&sk->sk_drops);
263}
264
265static void netlink_rcv_wake(struct sock *sk)
266{
267 struct netlink_sock *nlk = nlk_sk(sk);
268
269 if (skb_queue_empty(&sk->sk_receive_queue))
270 clear_bit(NETLINK_CONGESTED, &nlk->state);
271 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
272 wake_up_interruptible(&nlk->wait);
273}
274
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000275#ifdef CONFIG_NETLINK_MMAP
Patrick McHardy9652e932013-04-17 06:47:02 +0000276static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
277{
278 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
279}
280
Patrick McHardyf9c22882013-04-17 06:47:04 +0000281static bool netlink_rx_is_mmaped(struct sock *sk)
282{
283 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
284}
285
Patrick McHardy5fd96122013-04-17 06:47:03 +0000286static bool netlink_tx_is_mmaped(struct sock *sk)
287{
288 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
289}
290
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000291static __pure struct page *pgvec_to_page(const void *addr)
292{
293 if (is_vmalloc_addr(addr))
294 return vmalloc_to_page(addr);
295 else
296 return virt_to_page(addr);
297}
298
299static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
300{
301 unsigned int i;
302
303 for (i = 0; i < len; i++) {
304 if (pg_vec[i] != NULL) {
305 if (is_vmalloc_addr(pg_vec[i]))
306 vfree(pg_vec[i]);
307 else
308 free_pages((unsigned long)pg_vec[i], order);
309 }
310 }
311 kfree(pg_vec);
312}
313
314static void *alloc_one_pg_vec_page(unsigned long order)
315{
316 void *buffer;
317 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
318 __GFP_NOWARN | __GFP_NORETRY;
319
320 buffer = (void *)__get_free_pages(gfp_flags, order);
321 if (buffer != NULL)
322 return buffer;
323
324 buffer = vzalloc((1 << order) * PAGE_SIZE);
325 if (buffer != NULL)
326 return buffer;
327
328 gfp_flags &= ~__GFP_NORETRY;
329 return (void *)__get_free_pages(gfp_flags, order);
330}
331
332static void **alloc_pg_vec(struct netlink_sock *nlk,
333 struct nl_mmap_req *req, unsigned int order)
334{
335 unsigned int block_nr = req->nm_block_nr;
336 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200337 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000338
339 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
340 if (pg_vec == NULL)
341 return NULL;
342
343 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200344 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000345 if (pg_vec[i] == NULL)
346 goto err1;
347 }
348
349 return pg_vec;
350err1:
351 free_pg_vec(pg_vec, order, block_nr);
352 return NULL;
353}
354
355static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
356 bool closing, bool tx_ring)
357{
358 struct netlink_sock *nlk = nlk_sk(sk);
359 struct netlink_ring *ring;
360 struct sk_buff_head *queue;
361 void **pg_vec = NULL;
362 unsigned int order = 0;
363 int err;
364
365 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
366 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
367
368 if (!closing) {
369 if (atomic_read(&nlk->mapped))
370 return -EBUSY;
371 if (atomic_read(&ring->pending))
372 return -EBUSY;
373 }
374
375 if (req->nm_block_nr) {
376 if (ring->pg_vec != NULL)
377 return -EBUSY;
378
379 if ((int)req->nm_block_size <= 0)
380 return -EINVAL;
Tobias Klauser74e83b22014-07-31 12:17:08 +0200381 if (!PAGE_ALIGNED(req->nm_block_size))
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000382 return -EINVAL;
383 if (req->nm_frame_size < NL_MMAP_HDRLEN)
384 return -EINVAL;
385 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
386 return -EINVAL;
387
388 ring->frames_per_block = req->nm_block_size /
389 req->nm_frame_size;
390 if (ring->frames_per_block == 0)
391 return -EINVAL;
392 if (ring->frames_per_block * req->nm_block_nr !=
393 req->nm_frame_nr)
394 return -EINVAL;
395
396 order = get_order(req->nm_block_size);
397 pg_vec = alloc_pg_vec(nlk, req, order);
398 if (pg_vec == NULL)
399 return -ENOMEM;
400 } else {
401 if (req->nm_frame_nr)
402 return -EINVAL;
403 }
404
405 err = -EBUSY;
406 mutex_lock(&nlk->pg_vec_lock);
407 if (closing || atomic_read(&nlk->mapped) == 0) {
408 err = 0;
409 spin_lock_bh(&queue->lock);
410
411 ring->frame_max = req->nm_frame_nr - 1;
412 ring->head = 0;
413 ring->frame_size = req->nm_frame_size;
414 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
415
416 swap(ring->pg_vec_len, req->nm_block_nr);
417 swap(ring->pg_vec_order, order);
418 swap(ring->pg_vec, pg_vec);
419
420 __skb_queue_purge(queue);
421 spin_unlock_bh(&queue->lock);
422
423 WARN_ON(atomic_read(&nlk->mapped));
424 }
425 mutex_unlock(&nlk->pg_vec_lock);
426
427 if (pg_vec)
428 free_pg_vec(pg_vec, order, req->nm_block_nr);
429 return err;
430}
431
432static void netlink_mm_open(struct vm_area_struct *vma)
433{
434 struct file *file = vma->vm_file;
435 struct socket *sock = file->private_data;
436 struct sock *sk = sock->sk;
437
438 if (sk)
439 atomic_inc(&nlk_sk(sk)->mapped);
440}
441
442static void netlink_mm_close(struct vm_area_struct *vma)
443{
444 struct file *file = vma->vm_file;
445 struct socket *sock = file->private_data;
446 struct sock *sk = sock->sk;
447
448 if (sk)
449 atomic_dec(&nlk_sk(sk)->mapped);
450}
451
452static const struct vm_operations_struct netlink_mmap_ops = {
453 .open = netlink_mm_open,
454 .close = netlink_mm_close,
455};
456
457static int netlink_mmap(struct file *file, struct socket *sock,
458 struct vm_area_struct *vma)
459{
460 struct sock *sk = sock->sk;
461 struct netlink_sock *nlk = nlk_sk(sk);
462 struct netlink_ring *ring;
463 unsigned long start, size, expected;
464 unsigned int i;
465 int err = -EINVAL;
466
467 if (vma->vm_pgoff)
468 return -EINVAL;
469
470 mutex_lock(&nlk->pg_vec_lock);
471
472 expected = 0;
473 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
474 if (ring->pg_vec == NULL)
475 continue;
476 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
477 }
478
479 if (expected == 0)
480 goto out;
481
482 size = vma->vm_end - vma->vm_start;
483 if (size != expected)
484 goto out;
485
486 start = vma->vm_start;
487 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
488 if (ring->pg_vec == NULL)
489 continue;
490
491 for (i = 0; i < ring->pg_vec_len; i++) {
492 struct page *page;
493 void *kaddr = ring->pg_vec[i];
494 unsigned int pg_num;
495
496 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
497 page = pgvec_to_page(kaddr);
498 err = vm_insert_page(vma, start, page);
499 if (err < 0)
500 goto out;
501 start += PAGE_SIZE;
502 kaddr += PAGE_SIZE;
503 }
504 }
505 }
506
507 atomic_inc(&nlk->mapped);
508 vma->vm_ops = &netlink_mmap_ops;
509 err = 0;
510out:
511 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700512 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000513}
Patrick McHardy9652e932013-04-17 06:47:02 +0000514
David Miller4682a032014-12-16 17:58:17 -0500515static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
Patrick McHardy9652e932013-04-17 06:47:02 +0000516{
517#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
518 struct page *p_start, *p_end;
519
520 /* First page is flushed through netlink_{get,set}_status */
521 p_start = pgvec_to_page(hdr + PAGE_SIZE);
David Miller4682a032014-12-16 17:58:17 -0500522 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000523 while (p_start <= p_end) {
524 flush_dcache_page(p_start);
525 p_start++;
526 }
527#endif
528}
529
530static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
531{
532 smp_rmb();
533 flush_dcache_page(pgvec_to_page(hdr));
534 return hdr->nm_status;
535}
536
537static void netlink_set_status(struct nl_mmap_hdr *hdr,
538 enum nl_mmap_status status)
539{
Thomas Grafa18e6a12014-12-18 10:30:26 +0000540 smp_mb();
Patrick McHardy9652e932013-04-17 06:47:02 +0000541 hdr->nm_status = status;
542 flush_dcache_page(pgvec_to_page(hdr));
Patrick McHardy9652e932013-04-17 06:47:02 +0000543}
544
545static struct nl_mmap_hdr *
546__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
547{
548 unsigned int pg_vec_pos, frame_off;
549
550 pg_vec_pos = pos / ring->frames_per_block;
551 frame_off = pos % ring->frames_per_block;
552
553 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
554}
555
556static struct nl_mmap_hdr *
557netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
558 enum nl_mmap_status status)
559{
560 struct nl_mmap_hdr *hdr;
561
562 hdr = __netlink_lookup_frame(ring, pos);
563 if (netlink_get_status(hdr) != status)
564 return NULL;
565
566 return hdr;
567}
568
569static struct nl_mmap_hdr *
570netlink_current_frame(const struct netlink_ring *ring,
571 enum nl_mmap_status status)
572{
573 return netlink_lookup_frame(ring, ring->head, status);
574}
575
576static struct nl_mmap_hdr *
577netlink_previous_frame(const struct netlink_ring *ring,
578 enum nl_mmap_status status)
579{
580 unsigned int prev;
581
582 prev = ring->head ? ring->head - 1 : ring->frame_max;
583 return netlink_lookup_frame(ring, prev, status);
584}
585
586static void netlink_increment_head(struct netlink_ring *ring)
587{
588 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
589}
590
591static void netlink_forward_ring(struct netlink_ring *ring)
592{
593 unsigned int head = ring->head, pos = head;
594 const struct nl_mmap_hdr *hdr;
595
596 do {
597 hdr = __netlink_lookup_frame(ring, pos);
598 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
599 break;
600 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
601 break;
602 netlink_increment_head(ring);
603 } while (ring->head != head);
604}
605
Patrick McHardycd1df522013-04-17 06:47:05 +0000606static bool netlink_dump_space(struct netlink_sock *nlk)
607{
608 struct netlink_ring *ring = &nlk->rx_ring;
609 struct nl_mmap_hdr *hdr;
610 unsigned int n;
611
612 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
613 if (hdr == NULL)
614 return false;
615
616 n = ring->head + ring->frame_max / 2;
617 if (n > ring->frame_max)
618 n -= ring->frame_max;
619
620 hdr = __netlink_lookup_frame(ring, n);
621
622 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
623}
624
Patrick McHardy9652e932013-04-17 06:47:02 +0000625static unsigned int netlink_poll(struct file *file, struct socket *sock,
626 poll_table *wait)
627{
628 struct sock *sk = sock->sk;
629 struct netlink_sock *nlk = nlk_sk(sk);
630 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000631 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000632
Patrick McHardycd1df522013-04-17 06:47:05 +0000633 if (nlk->rx_ring.pg_vec != NULL) {
634 /* Memory mapped sockets don't call recvmsg(), so flow control
635 * for dumps is performed here. A dump is allowed to continue
636 * if at least half the ring is unused.
637 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700638 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000639 err = netlink_dump(sk);
640 if (err < 0) {
Ben Pfaffac30ef82014-07-09 10:31:22 -0700641 sk->sk_err = -err;
Patrick McHardycd1df522013-04-17 06:47:05 +0000642 sk->sk_error_report(sk);
643 break;
644 }
645 }
646 netlink_rcv_wake(sk);
647 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000648
Patrick McHardy9652e932013-04-17 06:47:02 +0000649 mask = datagram_poll(file, sock, wait);
650
651 spin_lock_bh(&sk->sk_receive_queue.lock);
652 if (nlk->rx_ring.pg_vec) {
653 netlink_forward_ring(&nlk->rx_ring);
654 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
655 mask |= POLLIN | POLLRDNORM;
656 }
657 spin_unlock_bh(&sk->sk_receive_queue.lock);
658
659 spin_lock_bh(&sk->sk_write_queue.lock);
660 if (nlk->tx_ring.pg_vec) {
661 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
662 mask |= POLLOUT | POLLWRNORM;
663 }
664 spin_unlock_bh(&sk->sk_write_queue.lock);
665
666 return mask;
667}
668
669static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
670{
671 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
672}
673
674static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
675 struct netlink_ring *ring,
676 struct nl_mmap_hdr *hdr)
677{
678 unsigned int size;
679 void *data;
680
681 size = ring->frame_size - NL_MMAP_HDRLEN;
682 data = (void *)hdr + NL_MMAP_HDRLEN;
683
684 skb->head = data;
685 skb->data = data;
686 skb_reset_tail_pointer(skb);
687 skb->end = skb->tail + size;
688 skb->len = 0;
689
690 skb->destructor = netlink_skb_destructor;
691 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
692 NETLINK_CB(skb).sk = sk;
693}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000694
695static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
696 u32 dst_portid, u32 dst_group,
697 struct sock_iocb *siocb)
698{
699 struct netlink_sock *nlk = nlk_sk(sk);
700 struct netlink_ring *ring;
701 struct nl_mmap_hdr *hdr;
702 struct sk_buff *skb;
703 unsigned int maxlen;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000704 int err = 0, len = 0;
705
Patrick McHardy5fd96122013-04-17 06:47:03 +0000706 mutex_lock(&nlk->pg_vec_lock);
707
708 ring = &nlk->tx_ring;
709 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
710
711 do {
David Miller4682a032014-12-16 17:58:17 -0500712 unsigned int nm_len;
713
Patrick McHardy5fd96122013-04-17 06:47:03 +0000714 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
715 if (hdr == NULL) {
716 if (!(msg->msg_flags & MSG_DONTWAIT) &&
717 atomic_read(&nlk->tx_ring.pending))
718 schedule();
719 continue;
720 }
David Miller4682a032014-12-16 17:58:17 -0500721
722 nm_len = ACCESS_ONCE(hdr->nm_len);
723 if (nm_len > maxlen) {
Patrick McHardy5fd96122013-04-17 06:47:03 +0000724 err = -EINVAL;
725 goto out;
726 }
727
David Miller4682a032014-12-16 17:58:17 -0500728 netlink_frame_flush_dcache(hdr, nm_len);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000729
David Miller4682a032014-12-16 17:58:17 -0500730 skb = alloc_skb(nm_len, GFP_KERNEL);
731 if (skb == NULL) {
732 err = -ENOBUFS;
733 goto out;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000734 }
David Miller4682a032014-12-16 17:58:17 -0500735 __skb_put(skb, nm_len);
736 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
737 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000738
739 netlink_increment_head(ring);
740
741 NETLINK_CB(skb).portid = nlk->portid;
742 NETLINK_CB(skb).dst_group = dst_group;
743 NETLINK_CB(skb).creds = siocb->scm->creds;
744
745 err = security_netlink_send(sk, skb);
746 if (err) {
747 kfree_skb(skb);
748 goto out;
749 }
750
751 if (unlikely(dst_group)) {
752 atomic_inc(&skb->users);
753 netlink_broadcast(sk, skb, dst_portid, dst_group,
754 GFP_KERNEL);
755 }
756 err = netlink_unicast(sk, skb, dst_portid,
757 msg->msg_flags & MSG_DONTWAIT);
758 if (err < 0)
759 goto out;
760 len += err;
761
762 } while (hdr != NULL ||
763 (!(msg->msg_flags & MSG_DONTWAIT) &&
764 atomic_read(&nlk->tx_ring.pending)));
765
766 if (len > 0)
767 err = len;
768out:
769 mutex_unlock(&nlk->pg_vec_lock);
770 return err;
771}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000772
773static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
774{
775 struct nl_mmap_hdr *hdr;
776
777 hdr = netlink_mmap_hdr(skb);
778 hdr->nm_len = skb->len;
779 hdr->nm_group = NETLINK_CB(skb).dst_group;
780 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200781 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
782 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
David Miller4682a032014-12-16 17:58:17 -0500783 netlink_frame_flush_dcache(hdr, hdr->nm_len);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000784 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
785
786 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
787 kfree_skb(skb);
788}
789
790static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
791{
792 struct netlink_sock *nlk = nlk_sk(sk);
793 struct netlink_ring *ring = &nlk->rx_ring;
794 struct nl_mmap_hdr *hdr;
795
796 spin_lock_bh(&sk->sk_receive_queue.lock);
797 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
798 if (hdr == NULL) {
799 spin_unlock_bh(&sk->sk_receive_queue.lock);
800 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000801 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000802 return;
803 }
804 netlink_increment_head(ring);
805 __skb_queue_tail(&sk->sk_receive_queue, skb);
806 spin_unlock_bh(&sk->sk_receive_queue.lock);
807
808 hdr->nm_len = skb->len;
809 hdr->nm_group = NETLINK_CB(skb).dst_group;
810 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200811 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
812 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000813 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
814}
815
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000816#else /* CONFIG_NETLINK_MMAP */
Patrick McHardy9652e932013-04-17 06:47:02 +0000817#define netlink_skb_is_mmaped(skb) false
Patrick McHardyf9c22882013-04-17 06:47:04 +0000818#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000819#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000820#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000821#define netlink_poll datagram_poll
Patrick McHardy5fd96122013-04-17 06:47:03 +0000822#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000823#endif /* CONFIG_NETLINK_MMAP */
824
Patrick McHardycf0a0182013-04-17 06:47:00 +0000825static void netlink_skb_destructor(struct sk_buff *skb)
826{
Patrick McHardy9652e932013-04-17 06:47:02 +0000827#ifdef CONFIG_NETLINK_MMAP
828 struct nl_mmap_hdr *hdr;
829 struct netlink_ring *ring;
830 struct sock *sk;
831
832 /* If a packet from the kernel to userspace was freed because of an
833 * error without being delivered to userspace, the kernel must reset
834 * the status. In the direction userspace to kernel, the status is
835 * always reset here after the packet was processed and freed.
836 */
837 if (netlink_skb_is_mmaped(skb)) {
838 hdr = netlink_mmap_hdr(skb);
839 sk = NETLINK_CB(skb).sk;
840
Patrick McHardy5fd96122013-04-17 06:47:03 +0000841 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
842 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
843 ring = &nlk_sk(sk)->tx_ring;
844 } else {
845 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
846 hdr->nm_len = 0;
847 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
848 }
849 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000850 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000851
852 WARN_ON(atomic_read(&ring->pending) == 0);
853 atomic_dec(&ring->pending);
854 sock_put(sk);
855
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000856 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000857 }
858#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000859 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200860 if (!skb->cloned ||
861 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
862 vfree(skb->head);
863
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000864 skb->head = NULL;
865 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000866 if (skb->sk != NULL)
867 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000868}
869
870static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
871{
872 WARN_ON(skb->sk != NULL);
873 skb->sk = sk;
874 skb->destructor = netlink_skb_destructor;
875 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
876 sk_mem_charge(sk, skb->truesize);
877}
878
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879static void netlink_sock_destruct(struct sock *sk)
880{
Herbert Xu3f660d62007-05-03 03:17:14 -0700881 struct netlink_sock *nlk = nlk_sk(sk);
882
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700883 if (nlk->cb_running) {
884 if (nlk->cb.done)
885 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000886
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700887 module_put(nlk->cb.module);
888 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700889 }
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000892#ifdef CONFIG_NETLINK_MMAP
893 if (1) {
894 struct nl_mmap_req req;
895
896 memset(&req, 0, sizeof(req));
897 if (nlk->rx_ring.pg_vec)
898 netlink_set_ring(sk, &req, true, false);
899 memset(&req, 0, sizeof(req));
900 if (nlk->tx_ring.pg_vec)
901 netlink_set_ring(sk, &req, true, true);
902 }
903#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800906 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 return;
908 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700909
910 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
911 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
912 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913}
914
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800915/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
916 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
918 * this, _but_ remember, it adds useless work on UP machines.
919 */
920
Johannes Bergd136f1b2009-09-12 03:03:15 +0000921void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800922 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000924 might_sleep();
925
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700926 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
928 if (atomic_read(&nl_table_users)) {
929 DECLARE_WAITQUEUE(wait, current);
930
931 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800932 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 set_current_state(TASK_UNINTERRUPTIBLE);
934 if (atomic_read(&nl_table_users) == 0)
935 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700936 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700938 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 }
940
941 __set_current_state(TASK_RUNNING);
942 remove_wait_queue(&nl_table_wait, &wait);
943 }
944}
945
Johannes Bergd136f1b2009-09-12 03:03:15 +0000946void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800947 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700949 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 wake_up(&nl_table_wait);
951}
952
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800953static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954netlink_lock_table(void)
955{
956 /* read_lock() synchronizes us to netlink_table_grab */
957
958 read_lock(&nl_table_lock);
959 atomic_inc(&nl_table_users);
960 read_unlock(&nl_table_lock);
961}
962
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800963static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964netlink_unlock_table(void)
965{
966 if (atomic_dec_and_test(&nl_table_users))
967 wake_up(&nl_table_wait);
968}
969
Thomas Grafe3416942014-08-02 11:47:45 +0200970struct netlink_compare_arg
Gao fengda12c902013-06-06 14:49:11 +0800971{
Thomas Grafe3416942014-08-02 11:47:45 +0200972 struct net *net;
973 u32 portid;
974};
975
976static bool netlink_compare(void *ptr, void *arg)
977{
978 struct netlink_compare_arg *x = arg;
979 struct sock *sk = ptr;
980
981 return nlk_sk(sk)->portid == x->portid &&
982 net_eq(sock_net(sk), x->net);
983}
984
985static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
986 struct net *net)
987{
988 struct netlink_compare_arg arg = {
989 .net = net,
990 .portid = portid,
991 };
Thomas Grafe3416942014-08-02 11:47:45 +0200992
Thomas Graf8d24c0b2015-01-02 23:00:14 +0100993 return rhashtable_lookup_compare(&table->hash, &portid,
Thomas Grafe3416942014-08-02 11:47:45 +0200994 &netlink_compare, &arg);
Gao fengda12c902013-06-06 14:49:11 +0800995}
996
Ying Xuec5adde92015-01-12 14:52:23 +0800997static bool __netlink_insert(struct netlink_table *table, struct sock *sk,
998 struct net *net)
999{
1000 struct netlink_compare_arg arg = {
1001 .net = net,
1002 .portid = nlk_sk(sk)->portid,
1003 };
1004
1005 return rhashtable_lookup_compare_insert(&table->hash,
1006 &nlk_sk(sk)->node,
1007 &netlink_compare, &arg);
1008}
1009
Eric W. Biederman15e47302012-09-07 20:12:54 +00001010static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011{
Gao fengda12c902013-06-06 14:49:11 +08001012 struct netlink_table *table = &nl_table[protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
Thomas Grafe3416942014-08-02 11:47:45 +02001015 rcu_read_lock();
1016 sk = __netlink_lookup(table, portid, net);
1017 if (sk)
1018 sock_hold(sk);
1019 rcu_read_unlock();
1020
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 return sk;
1022}
1023
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001024static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025
Patrick McHardy4277a082006-03-20 18:52:01 -08001026static void
1027netlink_update_listeners(struct sock *sk)
1028{
1029 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001030 unsigned long mask;
1031 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001032 struct listeners *listeners;
1033
1034 listeners = nl_deref_protected(tbl->listeners);
1035 if (!listeners)
1036 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001037
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001038 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001039 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001040 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001041 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1042 mask |= nlk_sk(sk)->groups[i];
1043 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001044 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001045 }
1046 /* this function is only called with the netlink table "grabbed", which
1047 * makes sure updates are visible before bind or setsockopt return. */
1048}
1049
Eric W. Biederman15e47302012-09-07 20:12:54 +00001050static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051{
Gao fengda12c902013-06-06 14:49:11 +08001052 struct netlink_table *table = &nl_table[sk->sk_protocol];
Herbert Xu919d9db2015-01-16 17:23:48 +11001053 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Ying Xuec5adde92015-01-12 14:52:23 +08001055 lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
1057 err = -EBUSY;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001058 if (nlk_sk(sk)->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 goto err;
1060
1061 err = -ENOMEM;
Thomas Graf97defe12015-01-02 23:00:20 +01001062 if (BITS_PER_LONG > 32 &&
1063 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 goto err;
1065
Eric W. Biederman15e47302012-09-07 20:12:54 +00001066 nlk_sk(sk)->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001067 sock_hold(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001068
1069 err = 0;
1070 if (!__netlink_insert(table, sk, net)) {
1071 err = -EADDRINUSE;
Ying Xuec5adde92015-01-12 14:52:23 +08001072 sock_put(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001073 }
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075err:
Ying Xuec5adde92015-01-12 14:52:23 +08001076 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 return err;
1078}
1079
1080static void netlink_remove(struct sock *sk)
1081{
Thomas Grafe3416942014-08-02 11:47:45 +02001082 struct netlink_table *table;
1083
Thomas Grafe3416942014-08-02 11:47:45 +02001084 table = &nl_table[sk->sk_protocol];
Thomas Graf6eba8222014-11-13 13:45:46 +01001085 if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
Thomas Grafe3416942014-08-02 11:47:45 +02001086 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1087 __sock_put(sk);
1088 }
Thomas Grafe3416942014-08-02 11:47:45 +02001089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 netlink_table_grab();
Johannes Bergb10dcb32014-12-22 18:56:37 +01001091 if (nlk_sk(sk)->subscriptions) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 __sk_del_bind_node(sk);
Johannes Bergb10dcb32014-12-22 18:56:37 +01001093 netlink_update_listeners(sk);
1094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 netlink_table_ungrab();
1096}
1097
1098static struct proto netlink_proto = {
1099 .name = "NETLINK",
1100 .owner = THIS_MODULE,
1101 .obj_size = sizeof(struct netlink_sock),
1102};
1103
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001104static int __netlink_create(struct net *net, struct socket *sock,
1105 struct mutex *cb_mutex, int protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106{
1107 struct sock *sk;
1108 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001109
1110 sock->ops = &netlink_ops;
1111
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001112 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
Patrick McHardyab33a172005-08-14 19:31:36 -07001113 if (!sk)
1114 return -ENOMEM;
1115
1116 sock_init_data(sock, sk);
1117
1118 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001119 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001120 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001121 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001122 nlk->cb_mutex = &nlk->cb_def_mutex;
1123 mutex_init(nlk->cb_mutex);
1124 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001125 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001126#ifdef CONFIG_NETLINK_MMAP
1127 mutex_init(&nlk->pg_vec_lock);
1128#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001129
1130 sk->sk_destruct = netlink_sock_destruct;
1131 sk->sk_protocol = protocol;
1132 return 0;
1133}
1134
Eric Paris3f378b62009-11-05 22:18:14 -08001135static int netlink_create(struct net *net, struct socket *sock, int protocol,
1136 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001137{
1138 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001139 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001140 struct netlink_sock *nlk;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001141 int (*bind)(struct net *net, int group);
1142 void (*unbind)(struct net *net, int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001143 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145 sock->state = SS_UNCONNECTED;
1146
1147 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1148 return -ESOCKTNOSUPPORT;
1149
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001150 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 return -EPROTONOSUPPORT;
1152
Patrick McHardy77247bb2005-08-14 19:27:13 -07001153 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001154#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001155 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001156 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001157 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001158 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001159 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001160#endif
1161 if (nl_table[protocol].registered &&
1162 try_module_get(nl_table[protocol].module))
1163 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001164 else
1165 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001166 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001167 bind = nl_table[protocol].bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001168 unbind = nl_table[protocol].unbind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001169 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001170
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001171 if (err < 0)
1172 goto out;
1173
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001174 err = __netlink_create(net, sock, cb_mutex, protocol);
1175 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001176 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
David S. Miller6f756a82008-11-23 17:34:03 -08001178 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001179 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001180 local_bh_enable();
1181
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001182 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001183 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001184 nlk->netlink_bind = bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001185 nlk->netlink_unbind = unbind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001186out:
1187 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Patrick McHardyab33a172005-08-14 19:31:36 -07001189out_module:
1190 module_put(module);
1191 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192}
1193
Thomas Graf21e49022015-01-02 23:00:22 +01001194static void deferred_put_nlk_sk(struct rcu_head *head)
1195{
1196 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1197
1198 sock_put(&nlk->sk);
1199}
1200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201static int netlink_release(struct socket *sock)
1202{
1203 struct sock *sk = sock->sk;
1204 struct netlink_sock *nlk;
1205
1206 if (!sk)
1207 return 0;
1208
1209 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001210 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 nlk = nlk_sk(sk);
1212
Herbert Xu3f660d62007-05-03 03:17:14 -07001213 /*
1214 * OK. Socket is unlinked, any packets that arrive now
1215 * will be purged.
1216 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 sock->sk = NULL;
1219 wake_up_interruptible_all(&nlk->wait);
1220
1221 skb_queue_purge(&sk->sk_write_queue);
1222
Eric W. Biederman15e47302012-09-07 20:12:54 +00001223 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001225 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001227 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 };
Alan Sterne041c682006-03-27 01:16:30 -08001229 atomic_notifier_call_chain(&netlink_chain,
1230 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001231 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001232
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001233 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001234
Denis V. Lunevaed81562007-10-10 21:14:32 -07001235 if (netlink_is_kernel(sk)) {
Johannes Bergb10dcb32014-12-22 18:56:37 +01001236 netlink_table_grab();
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001237 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1238 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001239 struct listeners *old;
1240
1241 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1242 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1243 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001244 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001245 nl_table[sk->sk_protocol].bind = NULL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001246 nl_table[sk->sk_protocol].unbind = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001247 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001248 nl_table[sk->sk_protocol].registered = 0;
1249 }
Johannes Bergb10dcb32014-12-22 18:56:37 +01001250 netlink_table_ungrab();
Eric Dumazet658cb352012-04-22 21:30:21 +00001251 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07001252
Johannes Berg7d685362014-12-22 18:56:38 +01001253 if (nlk->netlink_unbind) {
1254 int i;
1255
1256 for (i = 0; i < nlk->ngroups; i++)
1257 if (test_bit(i, nlk->groups))
Johannes Berg023e2cf2014-12-23 21:00:06 +01001258 nlk->netlink_unbind(sock_net(sk), i + 1);
Johannes Berg7d685362014-12-22 18:56:38 +01001259 }
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001260 kfree(nlk->groups);
1261 nlk->groups = NULL;
1262
Eric Dumazet37558102008-11-24 14:05:22 -08001263 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001264 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001265 local_bh_enable();
Thomas Graf21e49022015-01-02 23:00:22 +01001266 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 return 0;
1268}
1269
1270static int netlink_autobind(struct socket *sock)
1271{
1272 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001273 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001274 struct netlink_table *table = &nl_table[sk->sk_protocol];
Eric W. Biederman15e47302012-09-07 20:12:54 +00001275 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 int err;
1277 static s32 rover = -4097;
1278
1279retry:
1280 cond_resched();
Thomas Grafe3416942014-08-02 11:47:45 +02001281 rcu_read_lock();
1282 if (__netlink_lookup(table, portid, net)) {
1283 /* Bind collision, search negative portid values. */
1284 portid = rover--;
1285 if (rover > -4097)
1286 rover = -4097;
1287 rcu_read_unlock();
1288 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 }
Thomas Grafe3416942014-08-02 11:47:45 +02001290 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Eric W. Biederman15e47302012-09-07 20:12:54 +00001292 err = netlink_insert(sk, net, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 if (err == -EADDRINUSE)
1294 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001295
1296 /* If 2 threads race to autobind, that is fine. */
1297 if (err == -EBUSY)
1298 err = 0;
1299
1300 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}
1302
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001303/**
1304 * __netlink_ns_capable - General netlink message capability test
1305 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1306 * @user_ns: The user namespace of the capability to use
1307 * @cap: The capability to use
1308 *
1309 * Test to see if the opener of the socket we received the message
1310 * from had when the netlink socket was created and the sender of the
1311 * message has has the capability @cap in the user namespace @user_ns.
1312 */
1313bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1314 struct user_namespace *user_ns, int cap)
1315{
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07001316 return ((nsp->flags & NETLINK_SKB_DST) ||
1317 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1318 ns_capable(user_ns, cap);
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001319}
1320EXPORT_SYMBOL(__netlink_ns_capable);
1321
1322/**
1323 * netlink_ns_capable - General netlink message capability test
1324 * @skb: socket buffer holding a netlink command from userspace
1325 * @user_ns: The user namespace of the capability to use
1326 * @cap: The capability to use
1327 *
1328 * Test to see if the opener of the socket we received the message
1329 * from had when the netlink socket was created and the sender of the
1330 * message has has the capability @cap in the user namespace @user_ns.
1331 */
1332bool netlink_ns_capable(const struct sk_buff *skb,
1333 struct user_namespace *user_ns, int cap)
1334{
1335 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1336}
1337EXPORT_SYMBOL(netlink_ns_capable);
1338
1339/**
1340 * netlink_capable - Netlink global message capability test
1341 * @skb: socket buffer holding a netlink command from userspace
1342 * @cap: The capability to use
1343 *
1344 * Test to see if the opener of the socket we received the message
1345 * from had when the netlink socket was created and the sender of the
1346 * message has has the capability @cap in all user namespaces.
1347 */
1348bool netlink_capable(const struct sk_buff *skb, int cap)
1349{
1350 return netlink_ns_capable(skb, &init_user_ns, cap);
1351}
1352EXPORT_SYMBOL(netlink_capable);
1353
1354/**
1355 * netlink_net_capable - Netlink network namespace message capability test
1356 * @skb: socket buffer holding a netlink command from userspace
1357 * @cap: The capability to use
1358 *
1359 * Test to see if the opener of the socket we received the message
1360 * from had when the netlink socket was created and the sender of the
1361 * message has has the capability @cap over the network namespace of
1362 * the socket we received the message from.
1363 */
1364bool netlink_net_capable(const struct sk_buff *skb, int cap)
1365{
1366 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1367}
1368EXPORT_SYMBOL(netlink_net_capable);
1369
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001370static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001371{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001372 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001373 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001374}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001376static void
1377netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1378{
1379 struct netlink_sock *nlk = nlk_sk(sk);
1380
1381 if (nlk->subscriptions && !subscriptions)
1382 __sk_del_bind_node(sk);
1383 else if (!nlk->subscriptions && subscriptions)
1384 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1385 nlk->subscriptions = subscriptions;
1386}
1387
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001388static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001389{
1390 struct netlink_sock *nlk = nlk_sk(sk);
1391 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001392 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001393 int err = 0;
1394
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001395 netlink_table_grab();
1396
Patrick McHardy513c2502005-09-06 15:43:59 -07001397 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001398 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001399 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001400 goto out_unlock;
1401 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001402
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001403 if (nlk->ngroups >= groups)
1404 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001405
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001406 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1407 if (new_groups == NULL) {
1408 err = -ENOMEM;
1409 goto out_unlock;
1410 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001411 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001412 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1413
1414 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001415 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001416 out_unlock:
1417 netlink_table_ungrab();
1418 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001419}
1420
Johannes Berg02c81ab2014-12-22 18:56:35 +01001421static void netlink_undo_bind(int group, long unsigned int groups,
Johannes Berg023e2cf2014-12-23 21:00:06 +01001422 struct sock *sk)
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001423{
Johannes Berg023e2cf2014-12-23 21:00:06 +01001424 struct netlink_sock *nlk = nlk_sk(sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001425 int undo;
1426
1427 if (!nlk->netlink_unbind)
1428 return;
1429
1430 for (undo = 0; undo < group; undo++)
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001431 if (test_bit(undo, &groups))
Johannes Berg023e2cf2014-12-23 21:00:06 +01001432 nlk->netlink_unbind(sock_net(sk), undo);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001433}
1434
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001435static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1436 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437{
1438 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001439 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 struct netlink_sock *nlk = nlk_sk(sk);
1441 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1442 int err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001443 long unsigned int groups = nladdr->nl_groups;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001444
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001445 if (addr_len < sizeof(struct sockaddr_nl))
1446 return -EINVAL;
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 if (nladdr->nl_family != AF_NETLINK)
1449 return -EINVAL;
1450
1451 /* Only superuser is allowed to listen multicasts */
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001452 if (groups) {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001453 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001454 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001455 err = netlink_realloc_groups(sk);
1456 if (err)
1457 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001460 if (nlk->portid)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001461 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 return -EINVAL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001463
1464 if (nlk->netlink_bind && groups) {
1465 int group;
1466
1467 for (group = 0; group < nlk->ngroups; group++) {
1468 if (!test_bit(group, &groups))
1469 continue;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001470 err = nlk->netlink_bind(net, group);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001471 if (!err)
1472 continue;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001473 netlink_undo_bind(group, groups, sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001474 return err;
1475 }
1476 }
1477
1478 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 err = nladdr->nl_pid ?
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001480 netlink_insert(sk, net, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 netlink_autobind(sock);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001482 if (err) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01001483 netlink_undo_bind(nlk->ngroups, groups, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 return err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 }
1487
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001488 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return 0;
1490
1491 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001492 netlink_update_subscriptions(sk, nlk->subscriptions +
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001493 hweight32(groups) -
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001494 hweight32(nlk->groups[0]));
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001495 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001496 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 netlink_table_ungrab();
1498
1499 return 0;
1500}
1501
1502static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1503 int alen, int flags)
1504{
1505 int err = 0;
1506 struct sock *sk = sock->sk;
1507 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001508 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Changli Gao6503d962010-03-31 22:58:26 +00001510 if (alen < sizeof(addr->sa_family))
1511 return -EINVAL;
1512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 if (addr->sa_family == AF_UNSPEC) {
1514 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001515 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001516 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return 0;
1518 }
1519 if (addr->sa_family != AF_NETLINK)
1520 return -EINVAL;
1521
Mike Pecovnik46833a82014-02-24 21:11:16 +01001522 if ((nladdr->nl_groups || nladdr->nl_pid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001523 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 return -EPERM;
1525
Eric W. Biederman15e47302012-09-07 20:12:54 +00001526 if (!nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 err = netlink_autobind(sock);
1528
1529 if (err == 0) {
1530 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001531 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001532 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 }
1534
1535 return err;
1536}
1537
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001538static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1539 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540{
1541 struct sock *sk = sock->sk;
1542 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001543 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 nladdr->nl_family = AF_NETLINK;
1546 nladdr->nl_pad = 0;
1547 *addr_len = sizeof(*nladdr);
1548
1549 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001550 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001551 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001553 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001554 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 }
1556 return 0;
1557}
1558
Eric W. Biederman15e47302012-09-07 20:12:54 +00001559static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 struct sock *sock;
1562 struct netlink_sock *nlk;
1563
Eric W. Biederman15e47302012-09-07 20:12:54 +00001564 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 if (!sock)
1566 return ERR_PTR(-ECONNREFUSED);
1567
1568 /* Don't bother queuing skb if kernel socket has no input function */
1569 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001570 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001571 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 sock_put(sock);
1573 return ERR_PTR(-ECONNREFUSED);
1574 }
1575 return sock;
1576}
1577
1578struct sock *netlink_getsockbyfilp(struct file *filp)
1579{
Al Viro496ad9a2013-01-23 17:07:38 -05001580 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 struct sock *sock;
1582
1583 if (!S_ISSOCK(inode->i_mode))
1584 return ERR_PTR(-ENOTSOCK);
1585
1586 sock = SOCKET_I(inode)->sk;
1587 if (sock->sk_family != AF_NETLINK)
1588 return ERR_PTR(-EINVAL);
1589
1590 sock_hold(sock);
1591 return sock;
1592}
1593
Pablo Neira3a365152013-06-28 03:04:23 +02001594static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1595 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001596{
1597 struct sk_buff *skb;
1598 void *data;
1599
Pablo Neira3a365152013-06-28 03:04:23 +02001600 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001601 return alloc_skb(size, GFP_KERNEL);
1602
Pablo Neira3a365152013-06-28 03:04:23 +02001603 size = SKB_DATA_ALIGN(size) +
1604 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001605
1606 data = vmalloc(size);
1607 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001608 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001609
Pablo Neira3a365152013-06-28 03:04:23 +02001610 skb = build_skb(data, size);
1611 if (skb == NULL)
1612 vfree(data);
1613 else {
1614 skb->head_frag = 0;
1615 skb->destructor = netlink_skb_destructor;
1616 }
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001617
1618 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001619}
1620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621/*
1622 * Attach a skb to a netlink socket.
1623 * The caller must hold a reference to the destination socket. On error, the
1624 * reference is dropped. The skb is not send to the destination, just all
1625 * all error checks are performed and memory in the queue is reserved.
1626 * Return values:
1627 * < 0: error. skb freed, reference to sock dropped.
1628 * 0: continue
1629 * 1: repeat lookup - reference dropped while waiting for socket memory.
1630 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001631int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001632 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633{
1634 struct netlink_sock *nlk;
1635
1636 nlk = nlk_sk(sk);
1637
Patrick McHardy5fd96122013-04-17 06:47:03 +00001638 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1639 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1640 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001642 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001643 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 netlink_overrun(sk);
1645 sock_put(sk);
1646 kfree_skb(skb);
1647 return -EAGAIN;
1648 }
1649
1650 __set_current_state(TASK_INTERRUPTIBLE);
1651 add_wait_queue(&nlk->wait, &wait);
1652
1653 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Patrick McHardycd967e02013-04-17 06:46:56 +00001654 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001656 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
1658 __set_current_state(TASK_RUNNING);
1659 remove_wait_queue(&nlk->wait, &wait);
1660 sock_put(sk);
1661
1662 if (signal_pending(current)) {
1663 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001664 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 }
1666 return 1;
1667 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001668 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 return 0;
1670}
1671
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001672static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 int len = skb->len;
1675
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001676 netlink_deliver_tap(skb);
1677
Patrick McHardyf9c22882013-04-17 06:47:04 +00001678#ifdef CONFIG_NETLINK_MMAP
1679 if (netlink_skb_is_mmaped(skb))
1680 netlink_queue_mmaped_skb(sk, skb);
1681 else if (netlink_rx_is_mmaped(sk))
1682 netlink_ring_set_copied(sk, skb);
1683 else
1684#endif /* CONFIG_NETLINK_MMAP */
1685 skb_queue_tail(&sk->sk_receive_queue, skb);
David S. Miller676d2362014-04-11 16:15:36 -04001686 sk->sk_data_ready(sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001687 return len;
1688}
1689
1690int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1691{
1692 int len = __netlink_sendskb(sk, skb);
1693
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 sock_put(sk);
1695 return len;
1696}
1697
1698void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1699{
1700 kfree_skb(skb);
1701 sock_put(sk);
1702}
1703
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001704static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
1706 int delta;
1707
Patrick McHardy1298ca42013-04-17 06:46:59 +00001708 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001709 if (netlink_skb_is_mmaped(skb))
1710 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001712 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001713 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 return skb;
1715
1716 if (skb_shared(skb)) {
1717 struct sk_buff *nskb = skb_clone(skb, allocation);
1718 if (!nskb)
1719 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001720 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 skb = nskb;
1722 }
1723
1724 if (!pskb_expand_head(skb, 0, -delta, allocation))
1725 skb->truesize -= delta;
1726
1727 return skb;
1728}
1729
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001730static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1731 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001732{
1733 int ret;
1734 struct netlink_sock *nlk = nlk_sk(sk);
1735
1736 ret = -ECONNREFUSED;
1737 if (nlk->netlink_rcv != NULL) {
1738 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001739 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001740 NETLINK_CB(skb).sk = ssk;
Daniel Borkmann73bfd372013-12-23 14:35:55 +01001741 netlink_deliver_tap_kernel(sk, ssk, skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001742 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001743 consume_skb(skb);
1744 } else {
1745 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001746 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001747 sock_put(sk);
1748 return ret;
1749}
1750
1751int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001752 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753{
1754 struct sock *sk;
1755 int err;
1756 long timeo;
1757
1758 skb = netlink_trim(skb, gfp_any());
1759
1760 timeo = sock_sndtimeo(ssk, nonblock);
1761retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001762 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 if (IS_ERR(sk)) {
1764 kfree_skb(skb);
1765 return PTR_ERR(sk);
1766 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001767 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001768 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001769
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001770 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001771 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001772 kfree_skb(skb);
1773 sock_put(sk);
1774 return err;
1775 }
1776
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001777 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 if (err == 1)
1779 goto retry;
1780 if (err)
1781 return err;
1782
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001783 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001785EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Patrick McHardyf9c22882013-04-17 06:47:04 +00001787struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1788 u32 dst_portid, gfp_t gfp_mask)
1789{
1790#ifdef CONFIG_NETLINK_MMAP
1791 struct sock *sk = NULL;
1792 struct sk_buff *skb;
1793 struct netlink_ring *ring;
1794 struct nl_mmap_hdr *hdr;
1795 unsigned int maxlen;
1796
1797 sk = netlink_getsockbyportid(ssk, dst_portid);
1798 if (IS_ERR(sk))
1799 goto out;
1800
1801 ring = &nlk_sk(sk)->rx_ring;
1802 /* fast-path without atomic ops for common case: non-mmaped receiver */
1803 if (ring->pg_vec == NULL)
1804 goto out_put;
1805
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001806 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1807 goto out_put;
1808
Patrick McHardyf9c22882013-04-17 06:47:04 +00001809 skb = alloc_skb_head(gfp_mask);
1810 if (skb == NULL)
1811 goto err1;
1812
1813 spin_lock_bh(&sk->sk_receive_queue.lock);
1814 /* check again under lock */
1815 if (ring->pg_vec == NULL)
1816 goto out_free;
1817
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001818 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001819 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1820 if (maxlen < size)
1821 goto out_free;
1822
1823 netlink_forward_ring(ring);
1824 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1825 if (hdr == NULL)
1826 goto err2;
1827 netlink_ring_setup_skb(skb, sk, ring, hdr);
1828 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1829 atomic_inc(&ring->pending);
1830 netlink_increment_head(ring);
1831
1832 spin_unlock_bh(&sk->sk_receive_queue.lock);
1833 return skb;
1834
1835err2:
1836 kfree_skb(skb);
1837 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001838 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001839err1:
1840 sock_put(sk);
1841 return NULL;
1842
1843out_free:
1844 kfree_skb(skb);
1845 spin_unlock_bh(&sk->sk_receive_queue.lock);
1846out_put:
1847 sock_put(sk);
1848out:
1849#endif
1850 return alloc_skb(size, gfp_mask);
1851}
1852EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1853
Patrick McHardy4277a082006-03-20 18:52:01 -08001854int netlink_has_listeners(struct sock *sk, unsigned int group)
1855{
1856 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001857 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001858
Denis V. Lunevaed81562007-10-10 21:14:32 -07001859 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001860
1861 rcu_read_lock();
1862 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1863
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001864 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001865 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001866
1867 rcu_read_unlock();
1868
Patrick McHardy4277a082006-03-20 18:52:01 -08001869 return res;
1870}
1871EXPORT_SYMBOL_GPL(netlink_has_listeners);
1872
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001873static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874{
1875 struct netlink_sock *nlk = nlk_sk(sk);
1876
1877 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Patrick McHardycd967e02013-04-17 06:46:56 +00001878 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001879 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001880 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001881 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 }
1883 return -1;
1884}
1885
1886struct netlink_broadcast_data {
1887 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001888 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001889 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 u32 group;
1891 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001892 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 int congested;
1894 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001895 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001897 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1898 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899};
1900
Rami Rosen46c95212014-07-01 21:17:35 +03001901static void do_one_broadcast(struct sock *sk,
1902 struct netlink_broadcast_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903{
1904 struct netlink_sock *nlk = nlk_sk(sk);
1905 int val;
1906
1907 if (p->exclude_sk == sk)
Rami Rosen46c95212014-07-01 21:17:35 +03001908 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
Eric W. Biederman15e47302012-09-07 20:12:54 +00001910 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001911 !test_bit(p->group - 1, nlk->groups))
Rami Rosen46c95212014-07-01 21:17:35 +03001912 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001914 if (!net_eq(sock_net(sk), p->net))
Rami Rosen46c95212014-07-01 21:17:35 +03001915 return;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 if (p->failure) {
1918 netlink_overrun(sk);
Rami Rosen46c95212014-07-01 21:17:35 +03001919 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 }
1921
1922 sock_hold(sk);
1923 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001924 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 p->skb2 = skb_clone(p->skb, p->allocation);
1926 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001927 p->skb2 = skb_get(p->skb);
1928 /*
1929 * skb ownership may have been set when
1930 * delivered to a previous socket.
1931 */
1932 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 }
1934 }
1935 if (p->skb2 == NULL) {
1936 netlink_overrun(sk);
1937 /* Clone failed. Notify ALL listeners. */
1938 p->failure = 1;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001939 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1940 p->delivery_failure = 1;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001941 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1942 kfree_skb(p->skb2);
1943 p->skb2 = NULL;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001944 } else if (sk_filter(sk, p->skb2)) {
1945 kfree_skb(p->skb2);
1946 p->skb2 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1948 netlink_overrun(sk);
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001949 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1950 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 } else {
1952 p->congested |= val;
1953 p->delivered = 1;
1954 p->skb2 = NULL;
1955 }
1956 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957}
1958
Eric W. Biederman15e47302012-09-07 20:12:54 +00001959int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001960 u32 group, gfp_t allocation,
1961 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1962 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001964 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 struct sock *sk;
1967
1968 skb = netlink_trim(skb, allocation);
1969
1970 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001971 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001972 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 info.group = group;
1974 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001975 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 info.congested = 0;
1977 info.delivered = 0;
1978 info.allocation = allocation;
1979 info.skb = skb;
1980 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001981 info.tx_filter = filter;
1982 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
1984 /* While we sleep in clone, do not allow to change socket list */
1985
1986 netlink_lock_table();
1987
Sasha Levinb67bfe02013-02-27 17:06:00 -08001988 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 do_one_broadcast(sk, &info);
1990
Neil Horman70d4bf62010-07-20 06:45:56 +00001991 consume_skb(skb);
Tommy S. Christensenaa1c6a62005-05-19 13:07:32 -07001992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 netlink_unlock_table();
1994
Neil Horman70d4bf62010-07-20 06:45:56 +00001995 if (info.delivery_failure) {
1996 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001997 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00001998 }
1999 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 if (info.delivered) {
2002 if (info.congested && (allocation & __GFP_WAIT))
2003 yield();
2004 return 0;
2005 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 return -ESRCH;
2007}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002008EXPORT_SYMBOL(netlink_broadcast_filtered);
2009
Eric W. Biederman15e47302012-09-07 20:12:54 +00002010int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002011 u32 group, gfp_t allocation)
2012{
Eric W. Biederman15e47302012-09-07 20:12:54 +00002013 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002014 NULL, NULL);
2015}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002016EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018struct netlink_set_err_data {
2019 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002020 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 u32 group;
2022 int code;
2023};
2024
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00002025static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
2027 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002028 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
2030 if (sk == p->exclude_sk)
2031 goto out;
2032
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002033 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002034 goto out;
2035
Eric W. Biederman15e47302012-09-07 20:12:54 +00002036 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002037 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 goto out;
2039
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002040 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2041 ret = 1;
2042 goto out;
2043 }
2044
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 sk->sk_err = p->code;
2046 sk->sk_error_report(sk);
2047out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002048 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049}
2050
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002051/**
2052 * netlink_set_err - report error to broadcast listeners
2053 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002054 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002055 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002056 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002057 *
2058 * This function returns the number of broadcast listeners that have set the
2059 * NETLINK_RECV_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002060 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002061int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
2063 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002065 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
2067 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002068 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002070 /* sk->sk_err wants a positive error value */
2071 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
2073 read_lock(&nl_table_lock);
2074
Sasha Levinb67bfe02013-02-27 17:06:00 -08002075 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002076 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
2078 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002079 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002081EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
Johannes Berg84659eb2007-07-18 15:47:05 -07002083/* must be called with netlink table grabbed */
2084static void netlink_update_socket_mc(struct netlink_sock *nlk,
2085 unsigned int group,
2086 int is_new)
2087{
2088 int old, new = !!is_new, subscriptions;
2089
2090 old = test_bit(group - 1, nlk->groups);
2091 subscriptions = nlk->subscriptions - old + new;
2092 if (new)
2093 __set_bit(group - 1, nlk->groups);
2094 else
2095 __clear_bit(group - 1, nlk->groups);
2096 netlink_update_subscriptions(&nlk->sk, subscriptions);
2097 netlink_update_listeners(&nlk->sk);
2098}
2099
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002100static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002101 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002102{
2103 struct sock *sk = sock->sk;
2104 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002105 unsigned int val = 0;
2106 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002107
2108 if (level != SOL_NETLINK)
2109 return -ENOPROTOOPT;
2110
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002111 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2112 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002113 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002114 return -EFAULT;
2115
2116 switch (optname) {
2117 case NETLINK_PKTINFO:
2118 if (val)
2119 nlk->flags |= NETLINK_RECV_PKTINFO;
2120 else
2121 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2122 err = 0;
2123 break;
2124 case NETLINK_ADD_MEMBERSHIP:
2125 case NETLINK_DROP_MEMBERSHIP: {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002126 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002127 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002128 err = netlink_realloc_groups(sk);
2129 if (err)
2130 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002131 if (!val || val - 1 >= nlk->ngroups)
2132 return -EINVAL;
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002133 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01002134 err = nlk->netlink_bind(sock_net(sk), val);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04002135 if (err)
2136 return err;
2137 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002138 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002139 netlink_update_socket_mc(nlk, val,
2140 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002141 netlink_table_ungrab();
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002142 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
Johannes Berg023e2cf2014-12-23 21:00:06 +01002143 nlk->netlink_unbind(sock_net(sk), val);
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002144
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002145 err = 0;
2146 break;
2147 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002148 case NETLINK_BROADCAST_ERROR:
2149 if (val)
2150 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2151 else
2152 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2153 err = 0;
2154 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002155 case NETLINK_NO_ENOBUFS:
2156 if (val) {
2157 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
Patrick McHardycd967e02013-04-17 06:46:56 +00002158 clear_bit(NETLINK_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002159 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002160 } else {
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002161 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002162 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002163 err = 0;
2164 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002165#ifdef CONFIG_NETLINK_MMAP
2166 case NETLINK_RX_RING:
2167 case NETLINK_TX_RING: {
2168 struct nl_mmap_req req;
2169
2170 /* Rings might consume more memory than queue limits, require
2171 * CAP_NET_ADMIN.
2172 */
2173 if (!capable(CAP_NET_ADMIN))
2174 return -EPERM;
2175 if (optlen < sizeof(req))
2176 return -EINVAL;
2177 if (copy_from_user(&req, optval, sizeof(req)))
2178 return -EFAULT;
2179 err = netlink_set_ring(sk, &req, false,
2180 optname == NETLINK_TX_RING);
2181 break;
2182 }
2183#endif /* CONFIG_NETLINK_MMAP */
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002184 default:
2185 err = -ENOPROTOOPT;
2186 }
2187 return err;
2188}
2189
2190static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002191 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002192{
2193 struct sock *sk = sock->sk;
2194 struct netlink_sock *nlk = nlk_sk(sk);
2195 int len, val, err;
2196
2197 if (level != SOL_NETLINK)
2198 return -ENOPROTOOPT;
2199
2200 if (get_user(len, optlen))
2201 return -EFAULT;
2202 if (len < 0)
2203 return -EINVAL;
2204
2205 switch (optname) {
2206 case NETLINK_PKTINFO:
2207 if (len < sizeof(int))
2208 return -EINVAL;
2209 len = sizeof(int);
2210 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002211 if (put_user(len, optlen) ||
2212 put_user(val, optval))
2213 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002214 err = 0;
2215 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002216 case NETLINK_BROADCAST_ERROR:
2217 if (len < sizeof(int))
2218 return -EINVAL;
2219 len = sizeof(int);
2220 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2221 if (put_user(len, optlen) ||
2222 put_user(val, optval))
2223 return -EFAULT;
2224 err = 0;
2225 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002226 case NETLINK_NO_ENOBUFS:
2227 if (len < sizeof(int))
2228 return -EINVAL;
2229 len = sizeof(int);
2230 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2231 if (put_user(len, optlen) ||
2232 put_user(val, optval))
2233 return -EFAULT;
2234 err = 0;
2235 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002236 default:
2237 err = -ENOPROTOOPT;
2238 }
2239 return err;
2240}
2241
2242static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2243{
2244 struct nl_pktinfo info;
2245
2246 info.group = NETLINK_CB(skb).dst_group;
2247 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2248}
2249
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2251 struct msghdr *msg, size_t len)
2252{
2253 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2254 struct sock *sk = sock->sk;
2255 struct netlink_sock *nlk = nlk_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002256 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002257 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002258 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 struct sk_buff *skb;
2260 int err;
2261 struct scm_cookie scm;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002262 u32 netlink_skb_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
2264 if (msg->msg_flags&MSG_OOB)
2265 return -EOPNOTSUPP;
2266
Eric Dumazet16e57262011-09-19 05:52:27 +00002267 if (NULL == siocb->scm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 siocb->scm = &scm;
Eric Dumazet16e57262011-09-19 05:52:27 +00002269
Eric Dumazete0e3cea2012-08-21 06:21:17 +00002270 err = scm_send(sock, msg, siocb->scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 if (err < 0)
2272 return err;
2273
2274 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002275 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002277 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002278 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002279 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002280 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002281 if ((dst_group || dst_portid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002282 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002283 goto out;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002284 netlink_skb_flags |= NETLINK_SKB_DST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002286 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002287 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 }
2289
Eric W. Biederman15e47302012-09-07 20:12:54 +00002290 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 err = netlink_autobind(sock);
2292 if (err)
2293 goto out;
2294 }
2295
Patrick McHardy5fd96122013-04-17 06:47:03 +00002296 if (netlink_tx_is_mmaped(sk) &&
Al Viroc0371da2014-11-24 10:42:55 -05002297 msg->msg_iter.iov->iov_base == NULL) {
Patrick McHardy5fd96122013-04-17 06:47:03 +00002298 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2299 siocb);
2300 goto out;
2301 }
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 err = -EMSGSIZE;
2304 if (len > sk->sk_sndbuf - 32)
2305 goto out;
2306 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002307 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002308 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 goto out;
2310
Eric W. Biederman15e47302012-09-07 20:12:54 +00002311 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002312 NETLINK_CB(skb).dst_group = dst_group;
Eric W. Biedermandbe9a412012-09-06 18:20:01 +00002313 NETLINK_CB(skb).creds = siocb->scm->creds;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002314 NETLINK_CB(skb).flags = netlink_skb_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 err = -EFAULT;
Al Viro6ce8e9c2014-04-06 21:25:44 -04002317 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 kfree_skb(skb);
2319 goto out;
2320 }
2321
2322 err = security_netlink_send(sk, skb);
2323 if (err) {
2324 kfree_skb(skb);
2325 goto out;
2326 }
2327
Patrick McHardyd629b832005-08-14 19:27:50 -07002328 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002330 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002332 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333
2334out:
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002335 scm_destroy(siocb->scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 return err;
2337}
2338
2339static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2340 struct msghdr *msg, size_t len,
2341 int flags)
2342{
2343 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2344 struct scm_cookie scm;
2345 struct sock *sk = sock->sk;
2346 struct netlink_sock *nlk = nlk_sk(sk);
2347 int noblock = flags&MSG_DONTWAIT;
2348 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002349 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002350 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
2352 if (flags&MSG_OOB)
2353 return -EOPNOTSUPP;
2354
2355 copied = 0;
2356
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002357 skb = skb_recv_datagram(sk, flags, noblock, &err);
2358 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 goto out;
2360
Johannes Berg68d6ac62010-08-15 21:20:44 +00002361 data_skb = skb;
2362
Johannes Berg1dacc762009-07-01 11:26:02 +00002363#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2364 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002365 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002366 * If this skb has a frag_list, then here that means that we
2367 * will have to use the frag_list skb's data for compat tasks
2368 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002369 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002370 * If we need to send the compat skb, assign it to the
2371 * 'data_skb' variable so that it will be used below for data
2372 * copying. We keep 'skb' for everything else, including
2373 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002374 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002375 if (flags & MSG_CMSG_COMPAT)
2376 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002377 }
2378#endif
2379
Eric Dumazet9063e212014-03-07 12:02:33 -08002380 /* Record the max length of recvmsg() calls for future allocations */
2381 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2382 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2383 16384);
2384
Johannes Berg68d6ac62010-08-15 21:20:44 +00002385 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 if (len < copied) {
2387 msg->msg_flags |= MSG_TRUNC;
2388 copied = len;
2389 }
2390
Johannes Berg68d6ac62010-08-15 21:20:44 +00002391 skb_reset_transport_header(data_skb);
David S. Miller51f3d022014-11-05 16:46:40 -05002392 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
2394 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002395 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 addr->nl_family = AF_NETLINK;
2397 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002398 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002399 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 msg->msg_namelen = sizeof(*addr);
2401 }
2402
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002403 if (nlk->flags & NETLINK_RECV_PKTINFO)
2404 netlink_cmsg_recv_pktinfo(msg, skb);
2405
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 if (NULL == siocb->scm) {
2407 memset(&scm, 0, sizeof(scm));
2408 siocb->scm = &scm;
2409 }
2410 siocb->scm->creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002411 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002412 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002413
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 skb_free_datagram(sk, skb);
2415
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002416 if (nlk->cb_running &&
2417 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002418 ret = netlink_dump(sk);
2419 if (ret) {
Ben Pfaffac30ef82014-07-09 10:31:22 -07002420 sk->sk_err = -ret;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002421 sk->sk_error_report(sk);
2422 }
2423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
2425 scm_recv(sock, msg, siocb->scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426out:
2427 netlink_rcv_wake(sk);
2428 return err ? : copied;
2429}
2430
David S. Miller676d2362014-04-11 16:15:36 -04002431static void netlink_data_ready(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002433 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434}
2435
2436/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002437 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 * complete set of kernel non-blocking support for message
2439 * queueing.
2440 */
2441
2442struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002443__netlink_kernel_create(struct net *net, int unit, struct module *module,
2444 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445{
2446 struct socket *sock;
2447 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002448 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002449 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002450 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2451 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002453 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002455 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 return NULL;
2457
2458 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2459 return NULL;
2460
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002461 /*
2462 * We have to just have a reference on the net from sk, but don't
2463 * get_net it. Besides, we cannot get and then put the net here.
2464 * So we create one inside init_net and the move it to net.
2465 */
2466
2467 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2468 goto out_sock_release_nosk;
2469
2470 sk = sock->sk;
Denis V. Lunevedf02082008-02-29 11:18:32 -08002471 sk_change_net(sk, net);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002472
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002473 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002474 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002475 else
2476 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002477
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002478 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002479 if (!listeners)
2480 goto out_sock_release;
2481
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002483 if (cfg && cfg->input)
2484 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002486 if (netlink_insert(sk, net, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002487 goto out_sock_release;
2488
2489 nlk = nlk_sk(sk);
2490 nlk->flags |= NETLINK_KERNEL_SOCKET;
2491
2492 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002493 if (!nl_table[unit].registered) {
2494 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002495 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002496 nl_table[unit].cb_mutex = cb_mutex;
2497 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002498 if (cfg) {
2499 nl_table[unit].bind = cfg->bind;
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09002500 nl_table[unit].unbind = cfg->unbind;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002501 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002502 if (cfg->compare)
2503 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002504 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002505 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002506 } else {
2507 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002508 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002509 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002510 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002511 return sk;
2512
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002513out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002514 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002515 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002516 return NULL;
2517
2518out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002519 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002520 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002522EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002523
2524void
2525netlink_kernel_release(struct sock *sk)
2526{
Denis V. Lunevedf02082008-02-29 11:18:32 -08002527 sk_release_kernel(sk);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002528}
2529EXPORT_SYMBOL(netlink_kernel_release);
2530
Johannes Bergd136f1b2009-09-12 03:03:15 +00002531int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002532{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002533 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002534 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002535
2536 if (groups < 32)
2537 groups = 32;
2538
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002539 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002540 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2541 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002542 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002543 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002544 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2545 rcu_assign_pointer(tbl->listeners, new);
2546
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002547 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002548 }
2549 tbl->groups = groups;
2550
Johannes Bergd136f1b2009-09-12 03:03:15 +00002551 return 0;
2552}
2553
2554/**
2555 * netlink_change_ngroups - change number of multicast groups
2556 *
2557 * This changes the number of multicast groups that are available
2558 * on a certain netlink family. Note that it is not possible to
2559 * change the number of groups to below 32. Also note that it does
2560 * not implicitly call netlink_clear_multicast_users() when the
2561 * number of groups is reduced.
2562 *
2563 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2564 * @groups: The new number of groups.
2565 */
2566int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2567{
2568 int err;
2569
2570 netlink_table_grab();
2571 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002572 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002573
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002574 return err;
2575}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002576
Johannes Bergb8273572009-09-24 15:44:05 -07002577void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2578{
2579 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002580 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2581
Sasha Levinb67bfe02013-02-27 17:06:00 -08002582 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002583 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2584}
2585
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002586struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002587__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002588{
2589 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002590 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002591
Wang Yufen23b45672014-02-17 16:53:32 +08002592 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002593 nlh->nlmsg_type = type;
2594 nlh->nlmsg_len = size;
2595 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002596 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002597 nlh->nlmsg_seq = seq;
2598 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002599 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002600 return nlh;
2601}
2602EXPORT_SYMBOL(__nlmsg_put);
2603
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604/*
2605 * It looks a bit ugly.
2606 * It would be better to create kernel thread.
2607 */
2608
2609static int netlink_dump(struct sock *sk)
2610{
2611 struct netlink_sock *nlk = nlk_sk(sk);
2612 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002613 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 struct nlmsghdr *nlh;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002615 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002616 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002618 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002619 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002620 err = -EINVAL;
2621 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 }
2623
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002624 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002625 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2626
Patrick McHardyf9c22882013-04-17 06:47:04 +00002627 if (!netlink_rx_is_mmaped(sk) &&
2628 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2629 goto errout_skb;
Eric Dumazet9063e212014-03-07 12:02:33 -08002630
2631 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2632 * required, but it makes sense to _attempt_ a 16K bytes allocation
2633 * to reduce number of system calls on dump operations, if user
2634 * ever provided a big enough buffer.
2635 */
2636 if (alloc_size < nlk->max_recvmsg_len) {
2637 skb = netlink_alloc_skb(sk,
2638 nlk->max_recvmsg_len,
2639 nlk->portid,
2640 GFP_KERNEL |
2641 __GFP_NOWARN |
2642 __GFP_NORETRY);
2643 /* available room should be exact amount to avoid MSG_TRUNC */
2644 if (skb)
2645 skb_reserve(skb, skb_tailroom(skb) -
2646 nlk->max_recvmsg_len);
2647 }
2648 if (!skb)
2649 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2650 GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002651 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002652 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002653 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 len = cb->dump(skb, cb);
2656
2657 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002658 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002659
2660 if (sk_filter(sk, skb))
2661 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002662 else
2663 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 return 0;
2665 }
2666
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002667 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2668 if (!nlh)
2669 goto errout_skb;
2670
Johannes Berg670dc282011-06-20 13:40:46 +02002671 nl_dump_check_consistent(cb, nlh);
2672
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002673 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2674
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002675 if (sk_filter(sk, skb))
2676 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002677 else
2678 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679
Thomas Grafa8f74b22005-11-10 02:25:52 +01002680 if (cb->done)
2681 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002683 nlk->cb_running = false;
2684 mutex_unlock(nlk->cb_mutex);
Gao feng6dc878a2012-10-04 20:15:48 +00002685 module_put(cb->module);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002686 consume_skb(cb->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002688
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002689errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002690 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002691 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002692 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693}
2694
Gao feng6dc878a2012-10-04 20:15:48 +00002695int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2696 const struct nlmsghdr *nlh,
2697 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698{
2699 struct netlink_callback *cb;
2700 struct sock *sk;
2701 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002702 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
Patrick McHardyf9c22882013-04-17 06:47:04 +00002704 /* Memory mapped dump requests need to be copied to avoid looping
2705 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2706 * a reference to the skb.
2707 */
2708 if (netlink_skb_is_mmaped(skb)) {
2709 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002710 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002711 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002712 } else
2713 atomic_inc(&skb->users);
2714
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002715 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2716 if (sk == NULL) {
2717 ret = -ECONNREFUSED;
2718 goto error_free;
2719 }
2720
2721 nlk = nlk_sk(sk);
2722 mutex_lock(nlk->cb_mutex);
2723 /* A dump is in progress... */
2724 if (nlk->cb_running) {
2725 ret = -EBUSY;
2726 goto error_unlock;
2727 }
2728 /* add reference of module which cb->dump belongs to */
2729 if (!try_module_get(control->module)) {
2730 ret = -EPROTONOSUPPORT;
2731 goto error_unlock;
2732 }
2733
2734 cb = &nlk->cb;
2735 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002736 cb->dump = control->dump;
2737 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002739 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002740 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002741 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 cb->skb = skb;
2743
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002744 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002745
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002746 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747
Andrey Vaginb44d2112011-02-21 02:40:47 +00002748 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002750
Andrey Vaginb44d2112011-02-21 02:40:47 +00002751 if (ret)
2752 return ret;
2753
Denis V. Lunev5c582982007-10-23 20:29:25 -07002754 /* We successfully started a dump, by returning -EINTR we
2755 * signal not to send ACK even if it was requested.
2756 */
2757 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002758
2759error_unlock:
2760 sock_put(sk);
2761 mutex_unlock(nlk->cb_mutex);
2762error_free:
2763 kfree_skb(skb);
2764 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765}
Gao feng6dc878a2012-10-04 20:15:48 +00002766EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
2768void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2769{
2770 struct sk_buff *skb;
2771 struct nlmsghdr *rep;
2772 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002773 size_t payload = sizeof(*errmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
Thomas Graf339bf982006-11-10 14:10:15 -08002775 /* error messages get the original request appened */
2776 if (err)
2777 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778
Patrick McHardyf9c22882013-04-17 06:47:04 +00002779 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2780 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 if (!skb) {
2782 struct sock *sk;
2783
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002784 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002785 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002786 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 if (sk) {
2788 sk->sk_err = ENOBUFS;
2789 sk->sk_error_report(sk);
2790 sock_put(sk);
2791 }
2792 return;
2793 }
2794
Eric W. Biederman15e47302012-09-07 20:12:54 +00002795 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002796 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002797 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 errmsg->error = err;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002799 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002800 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002802EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002804int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002805 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002806{
Thomas Graf82ace472005-11-10 02:25:53 +01002807 struct nlmsghdr *nlh;
2808 int err;
2809
2810 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002811 int msglen;
2812
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002813 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002814 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002815
Martin Murrayad8e4b72006-01-10 13:02:29 -08002816 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002817 return 0;
2818
Thomas Grafd35b6852007-03-22 23:28:46 -07002819 /* Only requests are handled by the kernel */
2820 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002821 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002822
Thomas Graf45e7ae72007-03-22 23:29:10 -07002823 /* Skip control messages */
2824 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002825 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002826
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002827 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002828 if (err == -EINTR)
2829 goto skip;
2830
2831ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002832 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002833 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002834
Denis V. Lunev5c582982007-10-23 20:29:25 -07002835skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002836 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002837 if (msglen > skb->len)
2838 msglen = skb->len;
2839 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002840 }
2841
2842 return 0;
2843}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002844EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002845
2846/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002847 * nlmsg_notify - send a notification netlink message
2848 * @sk: netlink socket to use
2849 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002850 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002851 * @group: destination multicast group or 0
2852 * @report: 1 to report back, 0 to disable
2853 * @flags: allocation flags
2854 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002855int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002856 unsigned int group, int report, gfp_t flags)
2857{
2858 int err = 0;
2859
2860 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002861 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002862
2863 if (report) {
2864 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002865 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002866 }
2867
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002868 /* errors reported via destination sk->sk_err, but propagate
2869 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002870 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002871 }
2872
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002873 if (report) {
2874 int err2;
2875
Eric W. Biederman15e47302012-09-07 20:12:54 +00002876 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002877 if (!err || err == -ESRCH)
2878 err = err2;
2879 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07002880
2881 return err;
2882}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002883EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002884
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885#ifdef CONFIG_PROC_FS
2886struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08002887 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 int link;
2889 int hash_idx;
2890};
2891
2892static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2893{
2894 struct nl_seq_iter *iter = seq->private;
2895 int i, j;
Thomas Grafe3416942014-08-02 11:47:45 +02002896 struct netlink_sock *nlk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 loff_t off = 0;
2899
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002900 for (i = 0; i < MAX_LINKS; i++) {
Thomas Grafe3416942014-08-02 11:47:45 +02002901 struct rhashtable *ht = &nl_table[i].hash;
Eric Dumazet67a24ac2014-08-05 07:50:07 +02002902 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903
Thomas Grafe3416942014-08-02 11:47:45 +02002904 for (j = 0; j < tbl->size; j++) {
Thomas Graf88d6ed12015-01-02 23:00:16 +01002905 struct rhash_head *node;
2906
2907 rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
Thomas Grafe3416942014-08-02 11:47:45 +02002908 s = (struct sock *)nlk;
2909
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002910 if (sock_net(s) != seq_file_net(seq))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002911 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 if (off == pos) {
2913 iter->link = i;
2914 iter->hash_idx = j;
2915 return s;
2916 }
2917 ++off;
2918 }
2919 }
2920 }
2921 return NULL;
2922}
2923
2924static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
Thomas Graf21e49022015-01-02 23:00:22 +01002925 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926{
Thomas Grafe3416942014-08-02 11:47:45 +02002927 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2929}
2930
2931static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2932{
Thomas Graf78fd1d02014-10-21 22:05:38 +02002933 struct rhashtable *ht;
Thomas Graf88d6ed12015-01-02 23:00:16 +01002934 const struct bucket_table *tbl;
2935 struct rhash_head *node;
Thomas Grafe3416942014-08-02 11:47:45 +02002936 struct netlink_sock *nlk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 struct nl_seq_iter *iter;
Gao fengda12c902013-06-06 14:49:11 +08002938 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 int i, j;
2940
2941 ++*pos;
2942
2943 if (v == SEQ_START_TOKEN)
2944 return netlink_seq_socket_idx(seq, 0);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002945
Gao fengda12c902013-06-06 14:49:11 +08002946 net = seq_file_net(seq);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002947 iter = seq->private;
Thomas Grafe3416942014-08-02 11:47:45 +02002948 nlk = v;
2949
Thomas Graf78fd1d02014-10-21 22:05:38 +02002950 i = iter->link;
2951 ht = &nl_table[i].hash;
Thomas Graf88d6ed12015-01-02 23:00:16 +01002952 tbl = rht_dereference_rcu(ht->tbl, ht);
2953 rht_for_each_entry_rcu_continue(nlk, node, nlk->node.next, tbl, iter->hash_idx, node)
Thomas Grafe3416942014-08-02 11:47:45 +02002954 if (net_eq(sock_net((struct sock *)nlk), net))
2955 return nlk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 j = iter->hash_idx + 1;
2958
2959 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960
Thomas Grafe3416942014-08-02 11:47:45 +02002961 for (; j < tbl->size; j++) {
Thomas Graf88d6ed12015-01-02 23:00:16 +01002962 rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
Thomas Grafe3416942014-08-02 11:47:45 +02002963 if (net_eq(sock_net((struct sock *)nlk), net)) {
2964 iter->link = i;
2965 iter->hash_idx = j;
2966 return nlk;
2967 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 }
2969 }
2970
2971 j = 0;
2972 } while (++i < MAX_LINKS);
2973
2974 return NULL;
2975}
2976
2977static void netlink_seq_stop(struct seq_file *seq, void *v)
Thomas Graf21e49022015-01-02 23:00:22 +01002978 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979{
Thomas Grafe3416942014-08-02 11:47:45 +02002980 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981}
2982
2983
2984static int netlink_seq_show(struct seq_file *seq, void *v)
2985{
Eric Dumazet658cb352012-04-22 21:30:21 +00002986 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 seq_puts(seq,
2988 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00002989 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00002990 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 struct sock *s = v;
2992 struct netlink_sock *nlk = nlk_sk(s);
2993
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002994 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 s,
2996 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002997 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07002998 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07002999 sk_rmem_alloc_get(s),
3000 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003001 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07003002 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003003 atomic_read(&s->sk_drops),
3004 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 );
3006
3007 }
3008 return 0;
3009}
3010
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003011static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 .start = netlink_seq_start,
3013 .next = netlink_seq_next,
3014 .stop = netlink_seq_stop,
3015 .show = netlink_seq_show,
3016};
3017
3018
3019static int netlink_seq_open(struct inode *inode, struct file *file)
3020{
Denis V. Luneve372c412007-11-19 22:31:54 -08003021 return seq_open_net(inode, file, &netlink_seq_ops,
3022 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003023}
3024
Arjan van de Venda7071d2007-02-12 00:55:36 -08003025static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 .owner = THIS_MODULE,
3027 .open = netlink_seq_open,
3028 .read = seq_read,
3029 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003030 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031};
3032
3033#endif
3034
3035int netlink_register_notifier(struct notifier_block *nb)
3036{
Alan Sterne041c682006-03-27 01:16:30 -08003037 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003039EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040
3041int netlink_unregister_notifier(struct notifier_block *nb)
3042{
Alan Sterne041c682006-03-27 01:16:30 -08003043 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003045EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003046
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003047static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 .family = PF_NETLINK,
3049 .owner = THIS_MODULE,
3050 .release = netlink_release,
3051 .bind = netlink_bind,
3052 .connect = netlink_connect,
3053 .socketpair = sock_no_socketpair,
3054 .accept = sock_no_accept,
3055 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003056 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 .ioctl = sock_no_ioctl,
3058 .listen = sock_no_listen,
3059 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003060 .setsockopt = netlink_setsockopt,
3061 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 .sendmsg = netlink_sendmsg,
3063 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003064 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 .sendpage = sock_no_sendpage,
3066};
3067
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003068static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 .family = PF_NETLINK,
3070 .create = netlink_create,
3071 .owner = THIS_MODULE, /* for consistency 8) */
3072};
3073
Pavel Emelyanov46650792007-10-08 20:38:39 -07003074static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003075{
3076#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003077 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003078 return -ENOMEM;
3079#endif
3080 return 0;
3081}
3082
Pavel Emelyanov46650792007-10-08 20:38:39 -07003083static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003084{
3085#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003086 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003087#endif
3088}
3089
David S. Millerb963ea82010-08-30 19:08:01 -07003090static void __init netlink_add_usersock_entry(void)
3091{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003092 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003093 int groups = 32;
3094
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003095 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003096 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003097 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003098
3099 netlink_table_grab();
3100
3101 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003102 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003103 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3104 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003105 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003106
3107 netlink_table_ungrab();
3108}
3109
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003110static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003111 .init = netlink_net_init,
3112 .exit = netlink_net_exit,
3113};
3114
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115static int __init netlink_proto_init(void)
3116{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 int err = proto_register(&netlink_proto, 0);
Thomas Grafe3416942014-08-02 11:47:45 +02003119 struct rhashtable_params ht_params = {
3120 .head_offset = offsetof(struct netlink_sock, node),
3121 .key_offset = offsetof(struct netlink_sock, portid),
3122 .key_len = sizeof(u32), /* portid */
Daniel Borkmann7f19fc52014-12-10 16:33:10 +01003123 .hashfn = jhash,
Thomas Grafe3416942014-08-02 11:47:45 +02003124 .max_shift = 16, /* 64K */
3125 .grow_decision = rht_grow_above_75,
3126 .shrink_decision = rht_shrink_below_30,
Thomas Grafe3416942014-08-02 11:47:45 +02003127 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128
3129 if (err != 0)
3130 goto out;
3131
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003132 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003134 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003135 if (!nl_table)
3136 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 for (i = 0; i < MAX_LINKS; i++) {
Thomas Grafe3416942014-08-02 11:47:45 +02003139 if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
3140 while (--i > 0)
3141 rhashtable_destroy(&nl_table[i].hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003143 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 }
3146
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003147 INIT_LIST_HEAD(&netlink_tap_all);
3148
David S. Millerb963ea82010-08-30 19:08:01 -07003149 netlink_add_usersock_entry();
3150
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003152 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003153 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 rtnetlink_init();
3155out:
3156 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003157panic:
3158 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159}
3160
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161core_initcall(netlink_proto_init);