blob: bf7f56d7a9aa1406ad7c6f7a093ee03adcc4787b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Thomas Grafe3416942014-08-02 11:47:45 +020061#include <linux/rhashtable.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000062#include <asm/cacheflush.h>
Thomas Grafe3416942014-08-02 11:47:45 +020063#include <linux/hash.h>
Johannes Bergee1c24422015-01-16 11:37:14 +010064#include <linux/genetlink.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/sock.h>
68#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010069#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Andrey Vagin0f29c762013-03-21 20:33:47 +040071#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Eric Dumazet5c398dc2010-10-24 04:27:10 +000073struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000076};
77
Patrick McHardycd967e02013-04-17 06:46:56 +000078/* state bits */
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020079#define NETLINK_S_CONGESTED 0x0
Patrick McHardycd967e02013-04-17 06:46:56 +000080
81/* flags */
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020082#define NETLINK_F_KERNEL_SOCKET 0x1
83#define NETLINK_F_RECV_PKTINFO 0x2
84#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85#define NETLINK_F_RECV_NO_ENOBUFS 0x8
Patrick McHardy77247bb2005-08-14 19:27:13 -070086
David S. Miller035c4c12011-12-23 17:33:03 -050087static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070088{
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020089 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
Denis V. Lunevaed81562007-10-10 21:14:32 -070090}
91
Andrey Vagin0f29c762013-03-21 20:33:47 +040092struct netlink_table *nl_table;
93EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
96
97static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +000098static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Thomas Graf78fd1d02014-10-21 22:05:38 +0200100/* nl_table locking explained:
Thomas Graf21e49022015-01-02 23:00:22 +0100101 * Lookup and traversal are protected with an RCU read-side lock. Insertion
Ying Xuec5adde92015-01-12 14:52:23 +0800102 * and removal are protected with per bucket lock while using RCU list
Thomas Graf21e49022015-01-02 23:00:22 +0100103 * modification primitives and may run in parallel to RCU protected lookups.
104 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
105 * been acquired * either during or after the socket has been removed from
106 * the list and after an RCU grace period.
Thomas Graf78fd1d02014-10-21 22:05:38 +0200107 */
Andrey Vagin0f29c762013-03-21 20:33:47 +0400108DEFINE_RWLOCK(nl_table_lock);
109EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static atomic_t nl_table_users = ATOMIC_INIT(0);
111
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000112#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
113
Alan Sterne041c682006-03-27 01:16:30 -0800114static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200116static DEFINE_SPINLOCK(netlink_tap_lock);
117static struct list_head netlink_tap_all __read_mostly;
118
Herbert Xuc428ecd2015-03-20 21:57:01 +1100119static const struct rhashtable_params netlink_rhashtable_params;
120
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000121static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700122{
123 return group ? 1 << (group - 1) : 0;
124}
125
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200126int netlink_add_tap(struct netlink_tap *nt)
127{
128 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
129 return -EINVAL;
130
131 spin_lock(&netlink_tap_lock);
132 list_add_rcu(&nt->list, &netlink_tap_all);
133 spin_unlock(&netlink_tap_lock);
134
Markus Elfringfcd4d352014-11-18 21:03:13 +0100135 __module_get(nt->module);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200136
137 return 0;
138}
139EXPORT_SYMBOL_GPL(netlink_add_tap);
140
stephen hemminger2173f8d2013-12-30 10:49:22 -0800141static int __netlink_remove_tap(struct netlink_tap *nt)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200142{
143 bool found = false;
144 struct netlink_tap *tmp;
145
146 spin_lock(&netlink_tap_lock);
147
148 list_for_each_entry(tmp, &netlink_tap_all, list) {
149 if (nt == tmp) {
150 list_del_rcu(&nt->list);
151 found = true;
152 goto out;
153 }
154 }
155
156 pr_warn("__netlink_remove_tap: %p not found\n", nt);
157out:
158 spin_unlock(&netlink_tap_lock);
159
160 if (found && nt->module)
161 module_put(nt->module);
162
163 return found ? 0 : -ENODEV;
164}
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200165
166int netlink_remove_tap(struct netlink_tap *nt)
167{
168 int ret;
169
170 ret = __netlink_remove_tap(nt);
171 synchronize_net();
172
173 return ret;
174}
175EXPORT_SYMBOL_GPL(netlink_remove_tap);
176
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200177static bool netlink_filter_tap(const struct sk_buff *skb)
178{
179 struct sock *sk = skb->sk;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200180
181 /* We take the more conservative approach and
182 * whitelist socket protocols that may pass.
183 */
184 switch (sk->sk_protocol) {
185 case NETLINK_ROUTE:
186 case NETLINK_USERSOCK:
187 case NETLINK_SOCK_DIAG:
188 case NETLINK_NFLOG:
189 case NETLINK_XFRM:
190 case NETLINK_FIB_LOOKUP:
191 case NETLINK_NETFILTER:
192 case NETLINK_GENERIC:
Varka Bhadram498044b2014-07-16 10:59:47 +0530193 return true;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200194 }
195
Varka Bhadram498044b2014-07-16 10:59:47 +0530196 return false;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200197}
198
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200199static int __netlink_deliver_tap_skb(struct sk_buff *skb,
200 struct net_device *dev)
201{
202 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200203 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200204 int ret = -ENOMEM;
205
206 dev_hold(dev);
207 nskb = skb_clone(skb, GFP_ATOMIC);
208 if (nskb) {
209 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200210 nskb->protocol = htons((u16) sk->sk_protocol);
Daniel Borkmann604d13c2013-12-23 14:35:56 +0100211 nskb->pkt_type = netlink_is_kernel(sk) ?
212 PACKET_KERNEL : PACKET_USER;
Daniel Borkmann4e48ed82014-08-07 22:22:47 +0200213 skb_reset_network_header(nskb);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200214 ret = dev_queue_xmit(nskb);
215 if (unlikely(ret > 0))
216 ret = net_xmit_errno(ret);
217 }
218
219 dev_put(dev);
220 return ret;
221}
222
223static void __netlink_deliver_tap(struct sk_buff *skb)
224{
225 int ret;
226 struct netlink_tap *tmp;
227
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200228 if (!netlink_filter_tap(skb))
229 return;
230
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200231 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
232 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
233 if (unlikely(ret))
234 break;
235 }
236}
237
238static void netlink_deliver_tap(struct sk_buff *skb)
239{
240 rcu_read_lock();
241
242 if (unlikely(!list_empty(&netlink_tap_all)))
243 __netlink_deliver_tap(skb);
244
245 rcu_read_unlock();
246}
247
Daniel Borkmann73bfd372013-12-23 14:35:55 +0100248static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
249 struct sk_buff *skb)
250{
251 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
252 netlink_deliver_tap(skb);
253}
254
Patrick McHardycd1df522013-04-17 06:47:05 +0000255static void netlink_overrun(struct sock *sk)
256{
257 struct netlink_sock *nlk = nlk_sk(sk);
258
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +0200259 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
260 if (!test_and_set_bit(NETLINK_S_CONGESTED,
261 &nlk_sk(sk)->state)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000262 sk->sk_err = ENOBUFS;
263 sk->sk_error_report(sk);
264 }
265 }
266 atomic_inc(&sk->sk_drops);
267}
268
269static void netlink_rcv_wake(struct sock *sk)
270{
271 struct netlink_sock *nlk = nlk_sk(sk);
272
273 if (skb_queue_empty(&sk->sk_receive_queue))
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +0200274 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
275 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
Patrick McHardycd1df522013-04-17 06:47:05 +0000276 wake_up_interruptible(&nlk->wait);
277}
278
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000279#ifdef CONFIG_NETLINK_MMAP
Patrick McHardy9652e932013-04-17 06:47:02 +0000280static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
281{
282 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
283}
284
Patrick McHardyf9c22882013-04-17 06:47:04 +0000285static bool netlink_rx_is_mmaped(struct sock *sk)
286{
287 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
288}
289
Patrick McHardy5fd96122013-04-17 06:47:03 +0000290static bool netlink_tx_is_mmaped(struct sock *sk)
291{
292 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
293}
294
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000295static __pure struct page *pgvec_to_page(const void *addr)
296{
297 if (is_vmalloc_addr(addr))
298 return vmalloc_to_page(addr);
299 else
300 return virt_to_page(addr);
301}
302
303static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
304{
305 unsigned int i;
306
307 for (i = 0; i < len; i++) {
308 if (pg_vec[i] != NULL) {
309 if (is_vmalloc_addr(pg_vec[i]))
310 vfree(pg_vec[i]);
311 else
312 free_pages((unsigned long)pg_vec[i], order);
313 }
314 }
315 kfree(pg_vec);
316}
317
318static void *alloc_one_pg_vec_page(unsigned long order)
319{
320 void *buffer;
321 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
322 __GFP_NOWARN | __GFP_NORETRY;
323
324 buffer = (void *)__get_free_pages(gfp_flags, order);
325 if (buffer != NULL)
326 return buffer;
327
328 buffer = vzalloc((1 << order) * PAGE_SIZE);
329 if (buffer != NULL)
330 return buffer;
331
332 gfp_flags &= ~__GFP_NORETRY;
333 return (void *)__get_free_pages(gfp_flags, order);
334}
335
336static void **alloc_pg_vec(struct netlink_sock *nlk,
337 struct nl_mmap_req *req, unsigned int order)
338{
339 unsigned int block_nr = req->nm_block_nr;
340 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200341 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000342
343 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
344 if (pg_vec == NULL)
345 return NULL;
346
347 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200348 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000349 if (pg_vec[i] == NULL)
350 goto err1;
351 }
352
353 return pg_vec;
354err1:
355 free_pg_vec(pg_vec, order, block_nr);
356 return NULL;
357}
358
359static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
360 bool closing, bool tx_ring)
361{
362 struct netlink_sock *nlk = nlk_sk(sk);
363 struct netlink_ring *ring;
364 struct sk_buff_head *queue;
365 void **pg_vec = NULL;
366 unsigned int order = 0;
367 int err;
368
369 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
370 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
371
372 if (!closing) {
373 if (atomic_read(&nlk->mapped))
374 return -EBUSY;
375 if (atomic_read(&ring->pending))
376 return -EBUSY;
377 }
378
379 if (req->nm_block_nr) {
380 if (ring->pg_vec != NULL)
381 return -EBUSY;
382
383 if ((int)req->nm_block_size <= 0)
384 return -EINVAL;
Tobias Klauser74e83b22014-07-31 12:17:08 +0200385 if (!PAGE_ALIGNED(req->nm_block_size))
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000386 return -EINVAL;
387 if (req->nm_frame_size < NL_MMAP_HDRLEN)
388 return -EINVAL;
389 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
390 return -EINVAL;
391
392 ring->frames_per_block = req->nm_block_size /
393 req->nm_frame_size;
394 if (ring->frames_per_block == 0)
395 return -EINVAL;
396 if (ring->frames_per_block * req->nm_block_nr !=
397 req->nm_frame_nr)
398 return -EINVAL;
399
400 order = get_order(req->nm_block_size);
401 pg_vec = alloc_pg_vec(nlk, req, order);
402 if (pg_vec == NULL)
403 return -ENOMEM;
404 } else {
405 if (req->nm_frame_nr)
406 return -EINVAL;
407 }
408
409 err = -EBUSY;
410 mutex_lock(&nlk->pg_vec_lock);
411 if (closing || atomic_read(&nlk->mapped) == 0) {
412 err = 0;
413 spin_lock_bh(&queue->lock);
414
415 ring->frame_max = req->nm_frame_nr - 1;
416 ring->head = 0;
417 ring->frame_size = req->nm_frame_size;
418 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
419
420 swap(ring->pg_vec_len, req->nm_block_nr);
421 swap(ring->pg_vec_order, order);
422 swap(ring->pg_vec, pg_vec);
423
424 __skb_queue_purge(queue);
425 spin_unlock_bh(&queue->lock);
426
427 WARN_ON(atomic_read(&nlk->mapped));
428 }
429 mutex_unlock(&nlk->pg_vec_lock);
430
431 if (pg_vec)
432 free_pg_vec(pg_vec, order, req->nm_block_nr);
433 return err;
434}
435
436static void netlink_mm_open(struct vm_area_struct *vma)
437{
438 struct file *file = vma->vm_file;
439 struct socket *sock = file->private_data;
440 struct sock *sk = sock->sk;
441
442 if (sk)
443 atomic_inc(&nlk_sk(sk)->mapped);
444}
445
446static void netlink_mm_close(struct vm_area_struct *vma)
447{
448 struct file *file = vma->vm_file;
449 struct socket *sock = file->private_data;
450 struct sock *sk = sock->sk;
451
452 if (sk)
453 atomic_dec(&nlk_sk(sk)->mapped);
454}
455
456static const struct vm_operations_struct netlink_mmap_ops = {
457 .open = netlink_mm_open,
458 .close = netlink_mm_close,
459};
460
461static int netlink_mmap(struct file *file, struct socket *sock,
462 struct vm_area_struct *vma)
463{
464 struct sock *sk = sock->sk;
465 struct netlink_sock *nlk = nlk_sk(sk);
466 struct netlink_ring *ring;
467 unsigned long start, size, expected;
468 unsigned int i;
469 int err = -EINVAL;
470
471 if (vma->vm_pgoff)
472 return -EINVAL;
473
474 mutex_lock(&nlk->pg_vec_lock);
475
476 expected = 0;
477 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
478 if (ring->pg_vec == NULL)
479 continue;
480 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
481 }
482
483 if (expected == 0)
484 goto out;
485
486 size = vma->vm_end - vma->vm_start;
487 if (size != expected)
488 goto out;
489
490 start = vma->vm_start;
491 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
492 if (ring->pg_vec == NULL)
493 continue;
494
495 for (i = 0; i < ring->pg_vec_len; i++) {
496 struct page *page;
497 void *kaddr = ring->pg_vec[i];
498 unsigned int pg_num;
499
500 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
501 page = pgvec_to_page(kaddr);
502 err = vm_insert_page(vma, start, page);
503 if (err < 0)
504 goto out;
505 start += PAGE_SIZE;
506 kaddr += PAGE_SIZE;
507 }
508 }
509 }
510
511 atomic_inc(&nlk->mapped);
512 vma->vm_ops = &netlink_mmap_ops;
513 err = 0;
514out:
515 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700516 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000517}
Patrick McHardy9652e932013-04-17 06:47:02 +0000518
David Miller4682a032014-12-16 17:58:17 -0500519static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
Patrick McHardy9652e932013-04-17 06:47:02 +0000520{
521#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
522 struct page *p_start, *p_end;
523
524 /* First page is flushed through netlink_{get,set}_status */
525 p_start = pgvec_to_page(hdr + PAGE_SIZE);
David Miller4682a032014-12-16 17:58:17 -0500526 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000527 while (p_start <= p_end) {
528 flush_dcache_page(p_start);
529 p_start++;
530 }
531#endif
532}
533
534static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
535{
536 smp_rmb();
537 flush_dcache_page(pgvec_to_page(hdr));
538 return hdr->nm_status;
539}
540
541static void netlink_set_status(struct nl_mmap_hdr *hdr,
542 enum nl_mmap_status status)
543{
Thomas Grafa18e6a12014-12-18 10:30:26 +0000544 smp_mb();
Patrick McHardy9652e932013-04-17 06:47:02 +0000545 hdr->nm_status = status;
546 flush_dcache_page(pgvec_to_page(hdr));
Patrick McHardy9652e932013-04-17 06:47:02 +0000547}
548
549static struct nl_mmap_hdr *
550__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
551{
552 unsigned int pg_vec_pos, frame_off;
553
554 pg_vec_pos = pos / ring->frames_per_block;
555 frame_off = pos % ring->frames_per_block;
556
557 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
558}
559
560static struct nl_mmap_hdr *
561netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
562 enum nl_mmap_status status)
563{
564 struct nl_mmap_hdr *hdr;
565
566 hdr = __netlink_lookup_frame(ring, pos);
567 if (netlink_get_status(hdr) != status)
568 return NULL;
569
570 return hdr;
571}
572
573static struct nl_mmap_hdr *
574netlink_current_frame(const struct netlink_ring *ring,
575 enum nl_mmap_status status)
576{
577 return netlink_lookup_frame(ring, ring->head, status);
578}
579
580static struct nl_mmap_hdr *
581netlink_previous_frame(const struct netlink_ring *ring,
582 enum nl_mmap_status status)
583{
584 unsigned int prev;
585
586 prev = ring->head ? ring->head - 1 : ring->frame_max;
587 return netlink_lookup_frame(ring, prev, status);
588}
589
590static void netlink_increment_head(struct netlink_ring *ring)
591{
592 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
593}
594
595static void netlink_forward_ring(struct netlink_ring *ring)
596{
597 unsigned int head = ring->head, pos = head;
598 const struct nl_mmap_hdr *hdr;
599
600 do {
601 hdr = __netlink_lookup_frame(ring, pos);
602 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
603 break;
604 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
605 break;
606 netlink_increment_head(ring);
607 } while (ring->head != head);
608}
609
Patrick McHardycd1df522013-04-17 06:47:05 +0000610static bool netlink_dump_space(struct netlink_sock *nlk)
611{
612 struct netlink_ring *ring = &nlk->rx_ring;
613 struct nl_mmap_hdr *hdr;
614 unsigned int n;
615
616 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
617 if (hdr == NULL)
618 return false;
619
620 n = ring->head + ring->frame_max / 2;
621 if (n > ring->frame_max)
622 n -= ring->frame_max;
623
624 hdr = __netlink_lookup_frame(ring, n);
625
626 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
627}
628
Patrick McHardy9652e932013-04-17 06:47:02 +0000629static unsigned int netlink_poll(struct file *file, struct socket *sock,
630 poll_table *wait)
631{
632 struct sock *sk = sock->sk;
633 struct netlink_sock *nlk = nlk_sk(sk);
634 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000635 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000636
Patrick McHardycd1df522013-04-17 06:47:05 +0000637 if (nlk->rx_ring.pg_vec != NULL) {
638 /* Memory mapped sockets don't call recvmsg(), so flow control
639 * for dumps is performed here. A dump is allowed to continue
640 * if at least half the ring is unused.
641 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700642 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000643 err = netlink_dump(sk);
644 if (err < 0) {
Ben Pfaffac30ef82014-07-09 10:31:22 -0700645 sk->sk_err = -err;
Patrick McHardycd1df522013-04-17 06:47:05 +0000646 sk->sk_error_report(sk);
647 break;
648 }
649 }
650 netlink_rcv_wake(sk);
651 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000652
Patrick McHardy9652e932013-04-17 06:47:02 +0000653 mask = datagram_poll(file, sock, wait);
654
655 spin_lock_bh(&sk->sk_receive_queue.lock);
656 if (nlk->rx_ring.pg_vec) {
657 netlink_forward_ring(&nlk->rx_ring);
658 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
659 mask |= POLLIN | POLLRDNORM;
660 }
661 spin_unlock_bh(&sk->sk_receive_queue.lock);
662
663 spin_lock_bh(&sk->sk_write_queue.lock);
664 if (nlk->tx_ring.pg_vec) {
665 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
666 mask |= POLLOUT | POLLWRNORM;
667 }
668 spin_unlock_bh(&sk->sk_write_queue.lock);
669
670 return mask;
671}
672
673static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
674{
675 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
676}
677
678static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
679 struct netlink_ring *ring,
680 struct nl_mmap_hdr *hdr)
681{
682 unsigned int size;
683 void *data;
684
685 size = ring->frame_size - NL_MMAP_HDRLEN;
686 data = (void *)hdr + NL_MMAP_HDRLEN;
687
688 skb->head = data;
689 skb->data = data;
690 skb_reset_tail_pointer(skb);
691 skb->end = skb->tail + size;
692 skb->len = 0;
693
694 skb->destructor = netlink_skb_destructor;
695 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
696 NETLINK_CB(skb).sk = sk;
697}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000698
699static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
700 u32 dst_portid, u32 dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100701 struct scm_cookie *scm)
Patrick McHardy5fd96122013-04-17 06:47:03 +0000702{
703 struct netlink_sock *nlk = nlk_sk(sk);
704 struct netlink_ring *ring;
705 struct nl_mmap_hdr *hdr;
706 struct sk_buff *skb;
707 unsigned int maxlen;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000708 int err = 0, len = 0;
709
Patrick McHardy5fd96122013-04-17 06:47:03 +0000710 mutex_lock(&nlk->pg_vec_lock);
711
712 ring = &nlk->tx_ring;
713 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
714
715 do {
David Miller4682a032014-12-16 17:58:17 -0500716 unsigned int nm_len;
717
Patrick McHardy5fd96122013-04-17 06:47:03 +0000718 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
719 if (hdr == NULL) {
720 if (!(msg->msg_flags & MSG_DONTWAIT) &&
721 atomic_read(&nlk->tx_ring.pending))
722 schedule();
723 continue;
724 }
David Miller4682a032014-12-16 17:58:17 -0500725
726 nm_len = ACCESS_ONCE(hdr->nm_len);
727 if (nm_len > maxlen) {
Patrick McHardy5fd96122013-04-17 06:47:03 +0000728 err = -EINVAL;
729 goto out;
730 }
731
David Miller4682a032014-12-16 17:58:17 -0500732 netlink_frame_flush_dcache(hdr, nm_len);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000733
David Miller4682a032014-12-16 17:58:17 -0500734 skb = alloc_skb(nm_len, GFP_KERNEL);
735 if (skb == NULL) {
736 err = -ENOBUFS;
737 goto out;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000738 }
David Miller4682a032014-12-16 17:58:17 -0500739 __skb_put(skb, nm_len);
740 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
741 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000742
743 netlink_increment_head(ring);
744
745 NETLINK_CB(skb).portid = nlk->portid;
746 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100747 NETLINK_CB(skb).creds = scm->creds;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000748
749 err = security_netlink_send(sk, skb);
750 if (err) {
751 kfree_skb(skb);
752 goto out;
753 }
754
755 if (unlikely(dst_group)) {
756 atomic_inc(&skb->users);
757 netlink_broadcast(sk, skb, dst_portid, dst_group,
758 GFP_KERNEL);
759 }
760 err = netlink_unicast(sk, skb, dst_portid,
761 msg->msg_flags & MSG_DONTWAIT);
762 if (err < 0)
763 goto out;
764 len += err;
765
766 } while (hdr != NULL ||
767 (!(msg->msg_flags & MSG_DONTWAIT) &&
768 atomic_read(&nlk->tx_ring.pending)));
769
770 if (len > 0)
771 err = len;
772out:
773 mutex_unlock(&nlk->pg_vec_lock);
774 return err;
775}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000776
777static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
778{
779 struct nl_mmap_hdr *hdr;
780
781 hdr = netlink_mmap_hdr(skb);
782 hdr->nm_len = skb->len;
783 hdr->nm_group = NETLINK_CB(skb).dst_group;
784 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200785 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
786 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
David Miller4682a032014-12-16 17:58:17 -0500787 netlink_frame_flush_dcache(hdr, hdr->nm_len);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000788 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
789
790 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
791 kfree_skb(skb);
792}
793
794static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
795{
796 struct netlink_sock *nlk = nlk_sk(sk);
797 struct netlink_ring *ring = &nlk->rx_ring;
798 struct nl_mmap_hdr *hdr;
799
800 spin_lock_bh(&sk->sk_receive_queue.lock);
801 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
802 if (hdr == NULL) {
803 spin_unlock_bh(&sk->sk_receive_queue.lock);
804 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000805 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000806 return;
807 }
808 netlink_increment_head(ring);
809 __skb_queue_tail(&sk->sk_receive_queue, skb);
810 spin_unlock_bh(&sk->sk_receive_queue.lock);
811
812 hdr->nm_len = skb->len;
813 hdr->nm_group = NETLINK_CB(skb).dst_group;
814 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200815 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
816 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000817 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
818}
819
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000820#else /* CONFIG_NETLINK_MMAP */
Patrick McHardy9652e932013-04-17 06:47:02 +0000821#define netlink_skb_is_mmaped(skb) false
Patrick McHardyf9c22882013-04-17 06:47:04 +0000822#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000823#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000824#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000825#define netlink_poll datagram_poll
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100826#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000827#endif /* CONFIG_NETLINK_MMAP */
828
Patrick McHardycf0a0182013-04-17 06:47:00 +0000829static void netlink_skb_destructor(struct sk_buff *skb)
830{
Patrick McHardy9652e932013-04-17 06:47:02 +0000831#ifdef CONFIG_NETLINK_MMAP
832 struct nl_mmap_hdr *hdr;
833 struct netlink_ring *ring;
834 struct sock *sk;
835
836 /* If a packet from the kernel to userspace was freed because of an
837 * error without being delivered to userspace, the kernel must reset
838 * the status. In the direction userspace to kernel, the status is
839 * always reset here after the packet was processed and freed.
840 */
841 if (netlink_skb_is_mmaped(skb)) {
842 hdr = netlink_mmap_hdr(skb);
843 sk = NETLINK_CB(skb).sk;
844
Patrick McHardy5fd96122013-04-17 06:47:03 +0000845 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
846 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
847 ring = &nlk_sk(sk)->tx_ring;
848 } else {
849 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
850 hdr->nm_len = 0;
851 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
852 }
853 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000854 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000855
856 WARN_ON(atomic_read(&ring->pending) == 0);
857 atomic_dec(&ring->pending);
858 sock_put(sk);
859
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000860 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000861 }
862#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000863 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200864 if (!skb->cloned ||
865 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
866 vfree(skb->head);
867
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000868 skb->head = NULL;
869 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000870 if (skb->sk != NULL)
871 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000872}
873
874static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
875{
876 WARN_ON(skb->sk != NULL);
877 skb->sk = sk;
878 skb->destructor = netlink_skb_destructor;
879 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
880 sk_mem_charge(sk, skb->truesize);
881}
882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883static void netlink_sock_destruct(struct sock *sk)
884{
Herbert Xu3f660d62007-05-03 03:17:14 -0700885 struct netlink_sock *nlk = nlk_sk(sk);
886
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700887 if (nlk->cb_running) {
888 if (nlk->cb.done)
889 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000890
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700891 module_put(nlk->cb.module);
892 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700893 }
894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000896#ifdef CONFIG_NETLINK_MMAP
897 if (1) {
898 struct nl_mmap_req req;
899
900 memset(&req, 0, sizeof(req));
901 if (nlk->rx_ring.pg_vec)
902 netlink_set_ring(sk, &req, true, false);
903 memset(&req, 0, sizeof(req));
904 if (nlk->tx_ring.pg_vec)
905 netlink_set_ring(sk, &req, true, true);
906 }
907#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
909 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800910 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return;
912 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700913
914 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
915 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
916 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
918
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800919/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
920 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
922 * this, _but_ remember, it adds useless work on UP machines.
923 */
924
Johannes Bergd136f1b2009-09-12 03:03:15 +0000925void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800926 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000928 might_sleep();
929
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700930 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
932 if (atomic_read(&nl_table_users)) {
933 DECLARE_WAITQUEUE(wait, current);
934
935 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800936 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 set_current_state(TASK_UNINTERRUPTIBLE);
938 if (atomic_read(&nl_table_users) == 0)
939 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700940 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700942 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
944
945 __set_current_state(TASK_RUNNING);
946 remove_wait_queue(&nl_table_wait, &wait);
947 }
948}
949
Johannes Bergd136f1b2009-09-12 03:03:15 +0000950void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800951 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700953 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 wake_up(&nl_table_wait);
955}
956
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800957static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958netlink_lock_table(void)
959{
960 /* read_lock() synchronizes us to netlink_table_grab */
961
962 read_lock(&nl_table_lock);
963 atomic_inc(&nl_table_users);
964 read_unlock(&nl_table_lock);
965}
966
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800967static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968netlink_unlock_table(void)
969{
970 if (atomic_dec_and_test(&nl_table_users))
971 wake_up(&nl_table_wait);
972}
973
Thomas Grafe3416942014-08-02 11:47:45 +0200974struct netlink_compare_arg
Gao fengda12c902013-06-06 14:49:11 +0800975{
Herbert Xuc428ecd2015-03-20 21:57:01 +1100976 possible_net_t pnet;
Thomas Grafe3416942014-08-02 11:47:45 +0200977 u32 portid;
978};
979
Herbert Xu8f2ddaa2015-03-21 14:14:03 +1100980/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
981#define netlink_compare_arg_len \
982 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
Thomas Grafe3416942014-08-02 11:47:45 +0200983
Herbert Xuc428ecd2015-03-20 21:57:01 +1100984static inline int netlink_compare(struct rhashtable_compare_arg *arg,
985 const void *ptr)
986{
987 const struct netlink_compare_arg *x = arg->key;
988 const struct netlink_sock *nlk = ptr;
989
990 return nlk->portid != x->portid ||
991 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
992}
993
994static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
995 struct net *net, u32 portid)
996{
997 memset(arg, 0, sizeof(*arg));
998 write_pnet(&arg->pnet, net);
999 arg->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001000}
1001
1002static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1003 struct net *net)
1004{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001005 struct netlink_compare_arg arg;
Thomas Grafe3416942014-08-02 11:47:45 +02001006
Herbert Xuc428ecd2015-03-20 21:57:01 +11001007 netlink_compare_arg_init(&arg, net, portid);
1008 return rhashtable_lookup_fast(&table->hash, &arg,
1009 netlink_rhashtable_params);
Gao fengda12c902013-06-06 14:49:11 +08001010}
1011
Herbert Xuc428ecd2015-03-20 21:57:01 +11001012static int __netlink_insert(struct netlink_table *table, struct sock *sk)
Ying Xuec5adde92015-01-12 14:52:23 +08001013{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001014 struct netlink_compare_arg arg;
Ying Xuec5adde92015-01-12 14:52:23 +08001015
Herbert Xuc428ecd2015-03-20 21:57:01 +11001016 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1017 return rhashtable_lookup_insert_key(&table->hash, &arg,
1018 &nlk_sk(sk)->node,
1019 netlink_rhashtable_params);
Ying Xuec5adde92015-01-12 14:52:23 +08001020}
1021
Eric W. Biederman15e47302012-09-07 20:12:54 +00001022static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023{
Gao fengda12c902013-06-06 14:49:11 +08001024 struct netlink_table *table = &nl_table[protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Thomas Grafe3416942014-08-02 11:47:45 +02001027 rcu_read_lock();
1028 sk = __netlink_lookup(table, portid, net);
1029 if (sk)
1030 sock_hold(sk);
1031 rcu_read_unlock();
1032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 return sk;
1034}
1035
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001036static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Patrick McHardy4277a082006-03-20 18:52:01 -08001038static void
1039netlink_update_listeners(struct sock *sk)
1040{
1041 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001042 unsigned long mask;
1043 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001044 struct listeners *listeners;
1045
1046 listeners = nl_deref_protected(tbl->listeners);
1047 if (!listeners)
1048 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001049
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001050 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001051 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001052 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001053 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1054 mask |= nlk_sk(sk)->groups[i];
1055 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001056 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001057 }
1058 /* this function is only called with the netlink table "grabbed", which
1059 * makes sure updates are visible before bind or setsockopt return. */
1060}
1061
Herbert Xu8ea65f42015-01-26 14:02:56 +11001062static int netlink_insert(struct sock *sk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
Gao fengda12c902013-06-06 14:49:11 +08001064 struct netlink_table *table = &nl_table[sk->sk_protocol];
Herbert Xu919d9db2015-01-16 17:23:48 +11001065 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Ying Xuec5adde92015-01-12 14:52:23 +08001067 lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
1069 err = -EBUSY;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001070 if (nlk_sk(sk)->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 goto err;
1072
1073 err = -ENOMEM;
Thomas Graf97defe12015-01-02 23:00:20 +01001074 if (BITS_PER_LONG > 32 &&
1075 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 goto err;
1077
Eric W. Biederman15e47302012-09-07 20:12:54 +00001078 nlk_sk(sk)->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001079 sock_hold(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001080
Herbert Xuc428ecd2015-03-20 21:57:01 +11001081 err = __netlink_insert(table, sk);
1082 if (err) {
1083 if (err == -EEXIST)
1084 err = -EADDRINUSE;
Ying Xuec5adde92015-01-12 14:52:23 +08001085 sock_put(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001086 }
1087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088err:
Ying Xuec5adde92015-01-12 14:52:23 +08001089 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 return err;
1091}
1092
1093static void netlink_remove(struct sock *sk)
1094{
Thomas Grafe3416942014-08-02 11:47:45 +02001095 struct netlink_table *table;
1096
Thomas Grafe3416942014-08-02 11:47:45 +02001097 table = &nl_table[sk->sk_protocol];
Herbert Xuc428ecd2015-03-20 21:57:01 +11001098 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1099 netlink_rhashtable_params)) {
Thomas Grafe3416942014-08-02 11:47:45 +02001100 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1101 __sock_put(sk);
1102 }
Thomas Grafe3416942014-08-02 11:47:45 +02001103
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 netlink_table_grab();
Johannes Bergb10dcb32014-12-22 18:56:37 +01001105 if (nlk_sk(sk)->subscriptions) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 __sk_del_bind_node(sk);
Johannes Bergb10dcb32014-12-22 18:56:37 +01001107 netlink_update_listeners(sk);
1108 }
Johannes Bergee1c24422015-01-16 11:37:14 +01001109 if (sk->sk_protocol == NETLINK_GENERIC)
1110 atomic_inc(&genl_sk_destructing_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 netlink_table_ungrab();
1112}
1113
1114static struct proto netlink_proto = {
1115 .name = "NETLINK",
1116 .owner = THIS_MODULE,
1117 .obj_size = sizeof(struct netlink_sock),
1118};
1119
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001120static int __netlink_create(struct net *net, struct socket *sock,
1121 struct mutex *cb_mutex, int protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122{
1123 struct sock *sk;
1124 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001125
1126 sock->ops = &netlink_ops;
1127
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001128 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
Patrick McHardyab33a172005-08-14 19:31:36 -07001129 if (!sk)
1130 return -ENOMEM;
1131
1132 sock_init_data(sock, sk);
1133
1134 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001135 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001136 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001137 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001138 nlk->cb_mutex = &nlk->cb_def_mutex;
1139 mutex_init(nlk->cb_mutex);
1140 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001141 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001142#ifdef CONFIG_NETLINK_MMAP
1143 mutex_init(&nlk->pg_vec_lock);
1144#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001145
1146 sk->sk_destruct = netlink_sock_destruct;
1147 sk->sk_protocol = protocol;
1148 return 0;
1149}
1150
Eric Paris3f378b62009-11-05 22:18:14 -08001151static int netlink_create(struct net *net, struct socket *sock, int protocol,
1152 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001153{
1154 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001155 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001156 struct netlink_sock *nlk;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001157 int (*bind)(struct net *net, int group);
1158 void (*unbind)(struct net *net, int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001159 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
1161 sock->state = SS_UNCONNECTED;
1162
1163 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1164 return -ESOCKTNOSUPPORT;
1165
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001166 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 return -EPROTONOSUPPORT;
1168
Patrick McHardy77247bb2005-08-14 19:27:13 -07001169 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001170#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001171 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001172 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001173 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001174 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001175 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001176#endif
1177 if (nl_table[protocol].registered &&
1178 try_module_get(nl_table[protocol].module))
1179 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001180 else
1181 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001182 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001183 bind = nl_table[protocol].bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001184 unbind = nl_table[protocol].unbind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001185 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001186
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001187 if (err < 0)
1188 goto out;
1189
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001190 err = __netlink_create(net, sock, cb_mutex, protocol);
1191 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001192 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
David S. Miller6f756a82008-11-23 17:34:03 -08001194 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001195 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001196 local_bh_enable();
1197
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001198 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001199 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001200 nlk->netlink_bind = bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001201 nlk->netlink_unbind = unbind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001202out:
1203 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Patrick McHardyab33a172005-08-14 19:31:36 -07001205out_module:
1206 module_put(module);
1207 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208}
1209
Thomas Graf21e49022015-01-02 23:00:22 +01001210static void deferred_put_nlk_sk(struct rcu_head *head)
1211{
1212 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1213
1214 sock_put(&nlk->sk);
1215}
1216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217static int netlink_release(struct socket *sock)
1218{
1219 struct sock *sk = sock->sk;
1220 struct netlink_sock *nlk;
1221
1222 if (!sk)
1223 return 0;
1224
1225 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001226 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 nlk = nlk_sk(sk);
1228
Herbert Xu3f660d62007-05-03 03:17:14 -07001229 /*
1230 * OK. Socket is unlinked, any packets that arrive now
1231 * will be purged.
1232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Johannes Bergee1c24422015-01-16 11:37:14 +01001234 /* must not acquire netlink_table_lock in any way again before unbind
1235 * and notifying genetlink is done as otherwise it might deadlock
1236 */
1237 if (nlk->netlink_unbind) {
1238 int i;
1239
1240 for (i = 0; i < nlk->ngroups; i++)
1241 if (test_bit(i, nlk->groups))
1242 nlk->netlink_unbind(sock_net(sk), i + 1);
1243 }
1244 if (sk->sk_protocol == NETLINK_GENERIC &&
1245 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1246 wake_up(&genl_sk_destructing_waitq);
1247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 sock->sk = NULL;
1249 wake_up_interruptible_all(&nlk->wait);
1250
1251 skb_queue_purge(&sk->sk_write_queue);
1252
Eric W. Biederman15e47302012-09-07 20:12:54 +00001253 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001255 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001257 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 };
Alan Sterne041c682006-03-27 01:16:30 -08001259 atomic_notifier_call_chain(&netlink_chain,
1260 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001261 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001262
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001263 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001264
Denis V. Lunevaed81562007-10-10 21:14:32 -07001265 if (netlink_is_kernel(sk)) {
Johannes Bergb10dcb32014-12-22 18:56:37 +01001266 netlink_table_grab();
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001267 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1268 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001269 struct listeners *old;
1270
1271 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1272 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1273 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001274 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001275 nl_table[sk->sk_protocol].bind = NULL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001276 nl_table[sk->sk_protocol].unbind = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001277 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001278 nl_table[sk->sk_protocol].registered = 0;
1279 }
Johannes Bergb10dcb32014-12-22 18:56:37 +01001280 netlink_table_ungrab();
Eric Dumazet658cb352012-04-22 21:30:21 +00001281 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07001282
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001283 kfree(nlk->groups);
1284 nlk->groups = NULL;
1285
Eric Dumazet37558102008-11-24 14:05:22 -08001286 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001287 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001288 local_bh_enable();
Thomas Graf21e49022015-01-02 23:00:22 +01001289 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 return 0;
1291}
1292
1293static int netlink_autobind(struct socket *sock)
1294{
1295 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001296 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001297 struct netlink_table *table = &nl_table[sk->sk_protocol];
Eric W. Biederman15e47302012-09-07 20:12:54 +00001298 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 int err;
1300 static s32 rover = -4097;
1301
1302retry:
1303 cond_resched();
Thomas Grafe3416942014-08-02 11:47:45 +02001304 rcu_read_lock();
1305 if (__netlink_lookup(table, portid, net)) {
1306 /* Bind collision, search negative portid values. */
1307 portid = rover--;
1308 if (rover > -4097)
1309 rover = -4097;
1310 rcu_read_unlock();
1311 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 }
Thomas Grafe3416942014-08-02 11:47:45 +02001313 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
Herbert Xu8ea65f42015-01-26 14:02:56 +11001315 err = netlink_insert(sk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 if (err == -EADDRINUSE)
1317 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001318
1319 /* If 2 threads race to autobind, that is fine. */
1320 if (err == -EBUSY)
1321 err = 0;
1322
1323 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324}
1325
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001326/**
1327 * __netlink_ns_capable - General netlink message capability test
1328 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1329 * @user_ns: The user namespace of the capability to use
1330 * @cap: The capability to use
1331 *
1332 * Test to see if the opener of the socket we received the message
1333 * from had when the netlink socket was created and the sender of the
1334 * message has has the capability @cap in the user namespace @user_ns.
1335 */
1336bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1337 struct user_namespace *user_ns, int cap)
1338{
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07001339 return ((nsp->flags & NETLINK_SKB_DST) ||
1340 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1341 ns_capable(user_ns, cap);
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001342}
1343EXPORT_SYMBOL(__netlink_ns_capable);
1344
1345/**
1346 * netlink_ns_capable - General netlink message capability test
1347 * @skb: socket buffer holding a netlink command from userspace
1348 * @user_ns: The user namespace of the capability to use
1349 * @cap: The capability to use
1350 *
1351 * Test to see if the opener of the socket we received the message
1352 * from had when the netlink socket was created and the sender of the
1353 * message has has the capability @cap in the user namespace @user_ns.
1354 */
1355bool netlink_ns_capable(const struct sk_buff *skb,
1356 struct user_namespace *user_ns, int cap)
1357{
1358 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1359}
1360EXPORT_SYMBOL(netlink_ns_capable);
1361
1362/**
1363 * netlink_capable - Netlink global message capability test
1364 * @skb: socket buffer holding a netlink command from userspace
1365 * @cap: The capability to use
1366 *
1367 * Test to see if the opener of the socket we received the message
1368 * from had when the netlink socket was created and the sender of the
1369 * message has has the capability @cap in all user namespaces.
1370 */
1371bool netlink_capable(const struct sk_buff *skb, int cap)
1372{
1373 return netlink_ns_capable(skb, &init_user_ns, cap);
1374}
1375EXPORT_SYMBOL(netlink_capable);
1376
1377/**
1378 * netlink_net_capable - Netlink network namespace message capability test
1379 * @skb: socket buffer holding a netlink command from userspace
1380 * @cap: The capability to use
1381 *
1382 * Test to see if the opener of the socket we received the message
1383 * from had when the netlink socket was created and the sender of the
1384 * message has has the capability @cap over the network namespace of
1385 * the socket we received the message from.
1386 */
1387bool netlink_net_capable(const struct sk_buff *skb, int cap)
1388{
1389 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1390}
1391EXPORT_SYMBOL(netlink_net_capable);
1392
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001393static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001394{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001395 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001396 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001397}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001399static void
1400netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1401{
1402 struct netlink_sock *nlk = nlk_sk(sk);
1403
1404 if (nlk->subscriptions && !subscriptions)
1405 __sk_del_bind_node(sk);
1406 else if (!nlk->subscriptions && subscriptions)
1407 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1408 nlk->subscriptions = subscriptions;
1409}
1410
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001411static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001412{
1413 struct netlink_sock *nlk = nlk_sk(sk);
1414 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001415 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001416 int err = 0;
1417
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001418 netlink_table_grab();
1419
Patrick McHardy513c2502005-09-06 15:43:59 -07001420 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001421 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001422 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001423 goto out_unlock;
1424 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001425
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001426 if (nlk->ngroups >= groups)
1427 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001428
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001429 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1430 if (new_groups == NULL) {
1431 err = -ENOMEM;
1432 goto out_unlock;
1433 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001434 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001435 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1436
1437 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001438 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001439 out_unlock:
1440 netlink_table_ungrab();
1441 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001442}
1443
Johannes Berg02c81ab2014-12-22 18:56:35 +01001444static void netlink_undo_bind(int group, long unsigned int groups,
Johannes Berg023e2cf2014-12-23 21:00:06 +01001445 struct sock *sk)
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001446{
Johannes Berg023e2cf2014-12-23 21:00:06 +01001447 struct netlink_sock *nlk = nlk_sk(sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001448 int undo;
1449
1450 if (!nlk->netlink_unbind)
1451 return;
1452
1453 for (undo = 0; undo < group; undo++)
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001454 if (test_bit(undo, &groups))
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001455 nlk->netlink_unbind(sock_net(sk), undo + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001456}
1457
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001458static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1459 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460{
1461 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001462 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 struct netlink_sock *nlk = nlk_sk(sk);
1464 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1465 int err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001466 long unsigned int groups = nladdr->nl_groups;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001467
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001468 if (addr_len < sizeof(struct sockaddr_nl))
1469 return -EINVAL;
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 if (nladdr->nl_family != AF_NETLINK)
1472 return -EINVAL;
1473
1474 /* Only superuser is allowed to listen multicasts */
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001475 if (groups) {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001476 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001477 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001478 err = netlink_realloc_groups(sk);
1479 if (err)
1480 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001481 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001483 if (nlk->portid)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001484 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 return -EINVAL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001486
1487 if (nlk->netlink_bind && groups) {
1488 int group;
1489
1490 for (group = 0; group < nlk->ngroups; group++) {
1491 if (!test_bit(group, &groups))
1492 continue;
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001493 err = nlk->netlink_bind(net, group + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001494 if (!err)
1495 continue;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001496 netlink_undo_bind(group, groups, sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001497 return err;
1498 }
1499 }
1500
1501 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 err = nladdr->nl_pid ?
Herbert Xu8ea65f42015-01-26 14:02:56 +11001503 netlink_insert(sk, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 netlink_autobind(sock);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001505 if (err) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01001506 netlink_undo_bind(nlk->ngroups, groups, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 return err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 }
1510
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001511 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 return 0;
1513
1514 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001515 netlink_update_subscriptions(sk, nlk->subscriptions +
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001516 hweight32(groups) -
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001517 hweight32(nlk->groups[0]));
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001518 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001519 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 netlink_table_ungrab();
1521
1522 return 0;
1523}
1524
1525static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1526 int alen, int flags)
1527{
1528 int err = 0;
1529 struct sock *sk = sock->sk;
1530 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001531 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Changli Gao6503d962010-03-31 22:58:26 +00001533 if (alen < sizeof(addr->sa_family))
1534 return -EINVAL;
1535
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 if (addr->sa_family == AF_UNSPEC) {
1537 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001538 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001539 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 return 0;
1541 }
1542 if (addr->sa_family != AF_NETLINK)
1543 return -EINVAL;
1544
Mike Pecovnik46833a82014-02-24 21:11:16 +01001545 if ((nladdr->nl_groups || nladdr->nl_pid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001546 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 return -EPERM;
1548
Eric W. Biederman15e47302012-09-07 20:12:54 +00001549 if (!nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 err = netlink_autobind(sock);
1551
1552 if (err == 0) {
1553 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001554 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001555 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 }
1557
1558 return err;
1559}
1560
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001561static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1562 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563{
1564 struct sock *sk = sock->sk;
1565 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001566 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 nladdr->nl_family = AF_NETLINK;
1569 nladdr->nl_pad = 0;
1570 *addr_len = sizeof(*nladdr);
1571
1572 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001573 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001574 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001576 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001577 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
1579 return 0;
1580}
1581
Eric W. Biederman15e47302012-09-07 20:12:54 +00001582static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 struct sock *sock;
1585 struct netlink_sock *nlk;
1586
Eric W. Biederman15e47302012-09-07 20:12:54 +00001587 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 if (!sock)
1589 return ERR_PTR(-ECONNREFUSED);
1590
1591 /* Don't bother queuing skb if kernel socket has no input function */
1592 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001593 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001594 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 sock_put(sock);
1596 return ERR_PTR(-ECONNREFUSED);
1597 }
1598 return sock;
1599}
1600
1601struct sock *netlink_getsockbyfilp(struct file *filp)
1602{
Al Viro496ad9a2013-01-23 17:07:38 -05001603 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 struct sock *sock;
1605
1606 if (!S_ISSOCK(inode->i_mode))
1607 return ERR_PTR(-ENOTSOCK);
1608
1609 sock = SOCKET_I(inode)->sk;
1610 if (sock->sk_family != AF_NETLINK)
1611 return ERR_PTR(-EINVAL);
1612
1613 sock_hold(sock);
1614 return sock;
1615}
1616
Pablo Neira3a365152013-06-28 03:04:23 +02001617static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1618 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001619{
1620 struct sk_buff *skb;
1621 void *data;
1622
Pablo Neira3a365152013-06-28 03:04:23 +02001623 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001624 return alloc_skb(size, GFP_KERNEL);
1625
Pablo Neira3a365152013-06-28 03:04:23 +02001626 size = SKB_DATA_ALIGN(size) +
1627 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001628
1629 data = vmalloc(size);
1630 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001631 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001632
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001633 skb = __build_skb(data, size);
Pablo Neira3a365152013-06-28 03:04:23 +02001634 if (skb == NULL)
1635 vfree(data);
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001636 else
Pablo Neira3a365152013-06-28 03:04:23 +02001637 skb->destructor = netlink_skb_destructor;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001638
1639 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001640}
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642/*
1643 * Attach a skb to a netlink socket.
1644 * The caller must hold a reference to the destination socket. On error, the
1645 * reference is dropped. The skb is not send to the destination, just all
1646 * all error checks are performed and memory in the queue is reserved.
1647 * Return values:
1648 * < 0: error. skb freed, reference to sock dropped.
1649 * 0: continue
1650 * 1: repeat lookup - reference dropped while waiting for socket memory.
1651 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001652int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001653 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654{
1655 struct netlink_sock *nlk;
1656
1657 nlk = nlk_sk(sk);
1658
Patrick McHardy5fd96122013-04-17 06:47:03 +00001659 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001660 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
Patrick McHardy5fd96122013-04-17 06:47:03 +00001661 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001663 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001664 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 netlink_overrun(sk);
1666 sock_put(sk);
1667 kfree_skb(skb);
1668 return -EAGAIN;
1669 }
1670
1671 __set_current_state(TASK_INTERRUPTIBLE);
1672 add_wait_queue(&nlk->wait, &wait);
1673
1674 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001675 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001677 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 __set_current_state(TASK_RUNNING);
1680 remove_wait_queue(&nlk->wait, &wait);
1681 sock_put(sk);
1682
1683 if (signal_pending(current)) {
1684 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001685 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 }
1687 return 1;
1688 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001689 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 return 0;
1691}
1692
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001693static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 int len = skb->len;
1696
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001697 netlink_deliver_tap(skb);
1698
Patrick McHardyf9c22882013-04-17 06:47:04 +00001699#ifdef CONFIG_NETLINK_MMAP
1700 if (netlink_skb_is_mmaped(skb))
1701 netlink_queue_mmaped_skb(sk, skb);
1702 else if (netlink_rx_is_mmaped(sk))
1703 netlink_ring_set_copied(sk, skb);
1704 else
1705#endif /* CONFIG_NETLINK_MMAP */
1706 skb_queue_tail(&sk->sk_receive_queue, skb);
David S. Miller676d2362014-04-11 16:15:36 -04001707 sk->sk_data_ready(sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001708 return len;
1709}
1710
1711int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1712{
1713 int len = __netlink_sendskb(sk, skb);
1714
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 sock_put(sk);
1716 return len;
1717}
1718
1719void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1720{
1721 kfree_skb(skb);
1722 sock_put(sk);
1723}
1724
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001725static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726{
1727 int delta;
1728
Patrick McHardy1298ca42013-04-17 06:46:59 +00001729 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001730 if (netlink_skb_is_mmaped(skb))
1731 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001733 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001734 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 return skb;
1736
1737 if (skb_shared(skb)) {
1738 struct sk_buff *nskb = skb_clone(skb, allocation);
1739 if (!nskb)
1740 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001741 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 skb = nskb;
1743 }
1744
1745 if (!pskb_expand_head(skb, 0, -delta, allocation))
1746 skb->truesize -= delta;
1747
1748 return skb;
1749}
1750
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001751static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1752 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001753{
1754 int ret;
1755 struct netlink_sock *nlk = nlk_sk(sk);
1756
1757 ret = -ECONNREFUSED;
1758 if (nlk->netlink_rcv != NULL) {
1759 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001760 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001761 NETLINK_CB(skb).sk = ssk;
Daniel Borkmann73bfd372013-12-23 14:35:55 +01001762 netlink_deliver_tap_kernel(sk, ssk, skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001763 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001764 consume_skb(skb);
1765 } else {
1766 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001767 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001768 sock_put(sk);
1769 return ret;
1770}
1771
1772int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001773 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
1775 struct sock *sk;
1776 int err;
1777 long timeo;
1778
1779 skb = netlink_trim(skb, gfp_any());
1780
1781 timeo = sock_sndtimeo(ssk, nonblock);
1782retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001783 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 if (IS_ERR(sk)) {
1785 kfree_skb(skb);
1786 return PTR_ERR(sk);
1787 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001788 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001789 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001790
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001791 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001792 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001793 kfree_skb(skb);
1794 sock_put(sk);
1795 return err;
1796 }
1797
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001798 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 if (err == 1)
1800 goto retry;
1801 if (err)
1802 return err;
1803
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001804 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001806EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
Patrick McHardyf9c22882013-04-17 06:47:04 +00001808struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1809 u32 dst_portid, gfp_t gfp_mask)
1810{
1811#ifdef CONFIG_NETLINK_MMAP
1812 struct sock *sk = NULL;
1813 struct sk_buff *skb;
1814 struct netlink_ring *ring;
1815 struct nl_mmap_hdr *hdr;
1816 unsigned int maxlen;
1817
1818 sk = netlink_getsockbyportid(ssk, dst_portid);
1819 if (IS_ERR(sk))
1820 goto out;
1821
1822 ring = &nlk_sk(sk)->rx_ring;
1823 /* fast-path without atomic ops for common case: non-mmaped receiver */
1824 if (ring->pg_vec == NULL)
1825 goto out_put;
1826
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001827 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1828 goto out_put;
1829
Patrick McHardyf9c22882013-04-17 06:47:04 +00001830 skb = alloc_skb_head(gfp_mask);
1831 if (skb == NULL)
1832 goto err1;
1833
1834 spin_lock_bh(&sk->sk_receive_queue.lock);
1835 /* check again under lock */
1836 if (ring->pg_vec == NULL)
1837 goto out_free;
1838
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001839 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001840 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1841 if (maxlen < size)
1842 goto out_free;
1843
1844 netlink_forward_ring(ring);
1845 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1846 if (hdr == NULL)
1847 goto err2;
1848 netlink_ring_setup_skb(skb, sk, ring, hdr);
1849 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1850 atomic_inc(&ring->pending);
1851 netlink_increment_head(ring);
1852
1853 spin_unlock_bh(&sk->sk_receive_queue.lock);
1854 return skb;
1855
1856err2:
1857 kfree_skb(skb);
1858 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001859 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001860err1:
1861 sock_put(sk);
1862 return NULL;
1863
1864out_free:
1865 kfree_skb(skb);
1866 spin_unlock_bh(&sk->sk_receive_queue.lock);
1867out_put:
1868 sock_put(sk);
1869out:
1870#endif
1871 return alloc_skb(size, gfp_mask);
1872}
1873EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1874
Patrick McHardy4277a082006-03-20 18:52:01 -08001875int netlink_has_listeners(struct sock *sk, unsigned int group)
1876{
1877 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001878 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001879
Denis V. Lunevaed81562007-10-10 21:14:32 -07001880 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001881
1882 rcu_read_lock();
1883 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1884
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001885 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001886 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001887
1888 rcu_read_unlock();
1889
Patrick McHardy4277a082006-03-20 18:52:01 -08001890 return res;
1891}
1892EXPORT_SYMBOL_GPL(netlink_has_listeners);
1893
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001894static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895{
1896 struct netlink_sock *nlk = nlk_sk(sk);
1897
1898 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001899 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001900 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001901 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001902 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 }
1904 return -1;
1905}
1906
1907struct netlink_broadcast_data {
1908 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001909 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001910 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 u32 group;
1912 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001913 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 int congested;
1915 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001916 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001918 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1919 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920};
1921
Rami Rosen46c95212014-07-01 21:17:35 +03001922static void do_one_broadcast(struct sock *sk,
1923 struct netlink_broadcast_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924{
1925 struct netlink_sock *nlk = nlk_sk(sk);
1926 int val;
1927
1928 if (p->exclude_sk == sk)
Rami Rosen46c95212014-07-01 21:17:35 +03001929 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
Eric W. Biederman15e47302012-09-07 20:12:54 +00001931 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001932 !test_bit(p->group - 1, nlk->groups))
Rami Rosen46c95212014-07-01 21:17:35 +03001933 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001935 if (!net_eq(sock_net(sk), p->net))
Rami Rosen46c95212014-07-01 21:17:35 +03001936 return;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001937
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 if (p->failure) {
1939 netlink_overrun(sk);
Rami Rosen46c95212014-07-01 21:17:35 +03001940 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 }
1942
1943 sock_hold(sk);
1944 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001945 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 p->skb2 = skb_clone(p->skb, p->allocation);
1947 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001948 p->skb2 = skb_get(p->skb);
1949 /*
1950 * skb ownership may have been set when
1951 * delivered to a previous socket.
1952 */
1953 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 }
1955 }
1956 if (p->skb2 == NULL) {
1957 netlink_overrun(sk);
1958 /* Clone failed. Notify ALL listeners. */
1959 p->failure = 1;
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001960 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001961 p->delivery_failure = 1;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001962 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1963 kfree_skb(p->skb2);
1964 p->skb2 = NULL;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001965 } else if (sk_filter(sk, p->skb2)) {
1966 kfree_skb(p->skb2);
1967 p->skb2 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1969 netlink_overrun(sk);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001970 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001971 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 } else {
1973 p->congested |= val;
1974 p->delivered = 1;
1975 p->skb2 = NULL;
1976 }
1977 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978}
1979
Eric W. Biederman15e47302012-09-07 20:12:54 +00001980int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001981 u32 group, gfp_t allocation,
1982 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1983 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001985 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 struct sock *sk;
1988
1989 skb = netlink_trim(skb, allocation);
1990
1991 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001992 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001993 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 info.group = group;
1995 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001996 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 info.congested = 0;
1998 info.delivered = 0;
1999 info.allocation = allocation;
2000 info.skb = skb;
2001 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002002 info.tx_filter = filter;
2003 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005 /* While we sleep in clone, do not allow to change socket list */
2006
2007 netlink_lock_table();
2008
Sasha Levinb67bfe02013-02-27 17:06:00 -08002009 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 do_one_broadcast(sk, &info);
2011
Neil Horman70d4bf62010-07-20 06:45:56 +00002012 consume_skb(skb);
Tommy S. Christensenaa1c6a62005-05-19 13:07:32 -07002013
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 netlink_unlock_table();
2015
Neil Horman70d4bf62010-07-20 06:45:56 +00002016 if (info.delivery_failure) {
2017 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002018 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002019 }
2020 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 if (info.delivered) {
2023 if (info.congested && (allocation & __GFP_WAIT))
2024 yield();
2025 return 0;
2026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 return -ESRCH;
2028}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002029EXPORT_SYMBOL(netlink_broadcast_filtered);
2030
Eric W. Biederman15e47302012-09-07 20:12:54 +00002031int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002032 u32 group, gfp_t allocation)
2033{
Eric W. Biederman15e47302012-09-07 20:12:54 +00002034 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002035 NULL, NULL);
2036}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002037EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
2039struct netlink_set_err_data {
2040 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002041 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 u32 group;
2043 int code;
2044};
2045
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00002046static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047{
2048 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002049 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
2051 if (sk == p->exclude_sk)
2052 goto out;
2053
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002054 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002055 goto out;
2056
Eric W. Biederman15e47302012-09-07 20:12:54 +00002057 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002058 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 goto out;
2060
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002061 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002062 ret = 1;
2063 goto out;
2064 }
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 sk->sk_err = p->code;
2067 sk->sk_error_report(sk);
2068out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002069 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070}
2071
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002072/**
2073 * netlink_set_err - report error to broadcast listeners
2074 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002075 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002076 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002077 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002078 *
2079 * This function returns the number of broadcast listeners that have set the
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002080 * NETLINK_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002081 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002082int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083{
2084 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002086 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
2088 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002089 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002091 /* sk->sk_err wants a positive error value */
2092 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
2094 read_lock(&nl_table_lock);
2095
Sasha Levinb67bfe02013-02-27 17:06:00 -08002096 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002097 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002100 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002102EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
Johannes Berg84659eb2007-07-18 15:47:05 -07002104/* must be called with netlink table grabbed */
2105static void netlink_update_socket_mc(struct netlink_sock *nlk,
2106 unsigned int group,
2107 int is_new)
2108{
2109 int old, new = !!is_new, subscriptions;
2110
2111 old = test_bit(group - 1, nlk->groups);
2112 subscriptions = nlk->subscriptions - old + new;
2113 if (new)
2114 __set_bit(group - 1, nlk->groups);
2115 else
2116 __clear_bit(group - 1, nlk->groups);
2117 netlink_update_subscriptions(&nlk->sk, subscriptions);
2118 netlink_update_listeners(&nlk->sk);
2119}
2120
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002121static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002122 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002123{
2124 struct sock *sk = sock->sk;
2125 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002126 unsigned int val = 0;
2127 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002128
2129 if (level != SOL_NETLINK)
2130 return -ENOPROTOOPT;
2131
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002132 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2133 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002134 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002135 return -EFAULT;
2136
2137 switch (optname) {
2138 case NETLINK_PKTINFO:
2139 if (val)
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002140 nlk->flags |= NETLINK_F_RECV_PKTINFO;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002141 else
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002142 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002143 err = 0;
2144 break;
2145 case NETLINK_ADD_MEMBERSHIP:
2146 case NETLINK_DROP_MEMBERSHIP: {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002147 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002148 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002149 err = netlink_realloc_groups(sk);
2150 if (err)
2151 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002152 if (!val || val - 1 >= nlk->ngroups)
2153 return -EINVAL;
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002154 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01002155 err = nlk->netlink_bind(sock_net(sk), val);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04002156 if (err)
2157 return err;
2158 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002159 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002160 netlink_update_socket_mc(nlk, val,
2161 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002162 netlink_table_ungrab();
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002163 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
Johannes Berg023e2cf2014-12-23 21:00:06 +01002164 nlk->netlink_unbind(sock_net(sk), val);
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002165
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002166 err = 0;
2167 break;
2168 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002169 case NETLINK_BROADCAST_ERROR:
2170 if (val)
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002171 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002172 else
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002173 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002174 err = 0;
2175 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002176 case NETLINK_NO_ENOBUFS:
2177 if (val) {
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002178 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2179 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002180 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002181 } else {
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002182 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002183 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002184 err = 0;
2185 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002186#ifdef CONFIG_NETLINK_MMAP
2187 case NETLINK_RX_RING:
2188 case NETLINK_TX_RING: {
2189 struct nl_mmap_req req;
2190
2191 /* Rings might consume more memory than queue limits, require
2192 * CAP_NET_ADMIN.
2193 */
2194 if (!capable(CAP_NET_ADMIN))
2195 return -EPERM;
2196 if (optlen < sizeof(req))
2197 return -EINVAL;
2198 if (copy_from_user(&req, optval, sizeof(req)))
2199 return -EFAULT;
2200 err = netlink_set_ring(sk, &req, false,
2201 optname == NETLINK_TX_RING);
2202 break;
2203 }
2204#endif /* CONFIG_NETLINK_MMAP */
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002205 default:
2206 err = -ENOPROTOOPT;
2207 }
2208 return err;
2209}
2210
2211static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002212 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002213{
2214 struct sock *sk = sock->sk;
2215 struct netlink_sock *nlk = nlk_sk(sk);
2216 int len, val, err;
2217
2218 if (level != SOL_NETLINK)
2219 return -ENOPROTOOPT;
2220
2221 if (get_user(len, optlen))
2222 return -EFAULT;
2223 if (len < 0)
2224 return -EINVAL;
2225
2226 switch (optname) {
2227 case NETLINK_PKTINFO:
2228 if (len < sizeof(int))
2229 return -EINVAL;
2230 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002231 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002232 if (put_user(len, optlen) ||
2233 put_user(val, optval))
2234 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002235 err = 0;
2236 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002237 case NETLINK_BROADCAST_ERROR:
2238 if (len < sizeof(int))
2239 return -EINVAL;
2240 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002241 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002242 if (put_user(len, optlen) ||
2243 put_user(val, optval))
2244 return -EFAULT;
2245 err = 0;
2246 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002247 case NETLINK_NO_ENOBUFS:
2248 if (len < sizeof(int))
2249 return -EINVAL;
2250 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002251 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002252 if (put_user(len, optlen) ||
2253 put_user(val, optval))
2254 return -EFAULT;
2255 err = 0;
2256 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002257 default:
2258 err = -ENOPROTOOPT;
2259 }
2260 return err;
2261}
2262
2263static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2264{
2265 struct nl_pktinfo info;
2266
2267 info.group = NETLINK_CB(skb).dst_group;
2268 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2269}
2270
Ying Xue1b784142015-03-02 15:37:48 +08002271static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 struct sock *sk = sock->sk;
2274 struct netlink_sock *nlk = nlk_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002275 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002276 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002277 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 struct sk_buff *skb;
2279 int err;
2280 struct scm_cookie scm;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002281 u32 netlink_skb_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
2283 if (msg->msg_flags&MSG_OOB)
2284 return -EOPNOTSUPP;
2285
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002286 err = scm_send(sock, msg, &scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 if (err < 0)
2288 return err;
2289
2290 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002291 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002293 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002294 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002295 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002296 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002297 if ((dst_group || dst_portid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002298 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002299 goto out;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002300 netlink_skb_flags |= NETLINK_SKB_DST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002302 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002303 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 }
2305
Eric W. Biederman15e47302012-09-07 20:12:54 +00002306 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 err = netlink_autobind(sock);
2308 if (err)
2309 goto out;
2310 }
2311
Al Viroa8866ff2014-12-12 23:02:36 -05002312 /* It's a really convoluted way for userland to ask for mmaped
2313 * sendmsg(), but that's what we've got...
2314 */
Patrick McHardy5fd96122013-04-17 06:47:03 +00002315 if (netlink_tx_is_mmaped(sk) &&
Al Viroa8866ff2014-12-12 23:02:36 -05002316 msg->msg_iter.type == ITER_IOVEC &&
2317 msg->msg_iter.nr_segs == 1 &&
Al Viroc0371da2014-11-24 10:42:55 -05002318 msg->msg_iter.iov->iov_base == NULL) {
Patrick McHardy5fd96122013-04-17 06:47:03 +00002319 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002320 &scm);
Patrick McHardy5fd96122013-04-17 06:47:03 +00002321 goto out;
2322 }
2323
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 err = -EMSGSIZE;
2325 if (len > sk->sk_sndbuf - 32)
2326 goto out;
2327 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002328 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002329 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 goto out;
2331
Eric W. Biederman15e47302012-09-07 20:12:54 +00002332 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002333 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002334 NETLINK_CB(skb).creds = scm.creds;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002335 NETLINK_CB(skb).flags = netlink_skb_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 err = -EFAULT;
Al Viro6ce8e9c2014-04-06 21:25:44 -04002338 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 kfree_skb(skb);
2340 goto out;
2341 }
2342
2343 err = security_netlink_send(sk, skb);
2344 if (err) {
2345 kfree_skb(skb);
2346 goto out;
2347 }
2348
Patrick McHardyd629b832005-08-14 19:27:50 -07002349 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002351 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002353 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
2355out:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002356 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 return err;
2358}
2359
Ying Xue1b784142015-03-02 15:37:48 +08002360static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 int flags)
2362{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 struct scm_cookie scm;
2364 struct sock *sk = sock->sk;
2365 struct netlink_sock *nlk = nlk_sk(sk);
2366 int noblock = flags&MSG_DONTWAIT;
2367 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002368 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002369 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
2371 if (flags&MSG_OOB)
2372 return -EOPNOTSUPP;
2373
2374 copied = 0;
2375
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002376 skb = skb_recv_datagram(sk, flags, noblock, &err);
2377 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 goto out;
2379
Johannes Berg68d6ac62010-08-15 21:20:44 +00002380 data_skb = skb;
2381
Johannes Berg1dacc762009-07-01 11:26:02 +00002382#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2383 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002384 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002385 * If this skb has a frag_list, then here that means that we
2386 * will have to use the frag_list skb's data for compat tasks
2387 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002388 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002389 * If we need to send the compat skb, assign it to the
2390 * 'data_skb' variable so that it will be used below for data
2391 * copying. We keep 'skb' for everything else, including
2392 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002393 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002394 if (flags & MSG_CMSG_COMPAT)
2395 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002396 }
2397#endif
2398
Eric Dumazet9063e212014-03-07 12:02:33 -08002399 /* Record the max length of recvmsg() calls for future allocations */
2400 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2401 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2402 16384);
2403
Johannes Berg68d6ac62010-08-15 21:20:44 +00002404 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 if (len < copied) {
2406 msg->msg_flags |= MSG_TRUNC;
2407 copied = len;
2408 }
2409
Johannes Berg68d6ac62010-08-15 21:20:44 +00002410 skb_reset_transport_header(data_skb);
David S. Miller51f3d022014-11-05 16:46:40 -05002411 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412
2413 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002414 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 addr->nl_family = AF_NETLINK;
2416 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002417 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002418 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 msg->msg_namelen = sizeof(*addr);
2420 }
2421
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002422 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002423 netlink_cmsg_recv_pktinfo(msg, skb);
2424
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002425 memset(&scm, 0, sizeof(scm));
2426 scm.creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002427 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002428 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002429
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 skb_free_datagram(sk, skb);
2431
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002432 if (nlk->cb_running &&
2433 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002434 ret = netlink_dump(sk);
2435 if (ret) {
Ben Pfaffac30ef82014-07-09 10:31:22 -07002436 sk->sk_err = -ret;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002437 sk->sk_error_report(sk);
2438 }
2439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002441 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442out:
2443 netlink_rcv_wake(sk);
2444 return err ? : copied;
2445}
2446
David S. Miller676d2362014-04-11 16:15:36 -04002447static void netlink_data_ready(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002449 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450}
2451
2452/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002453 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 * complete set of kernel non-blocking support for message
2455 * queueing.
2456 */
2457
2458struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002459__netlink_kernel_create(struct net *net, int unit, struct module *module,
2460 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461{
2462 struct socket *sock;
2463 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002464 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002465 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002466 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2467 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002469 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002471 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 return NULL;
2473
2474 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2475 return NULL;
2476
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002477 /*
2478 * We have to just have a reference on the net from sk, but don't
2479 * get_net it. Besides, we cannot get and then put the net here.
2480 * So we create one inside init_net and the move it to net.
2481 */
2482
2483 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2484 goto out_sock_release_nosk;
2485
2486 sk = sock->sk;
Denis V. Lunevedf02082008-02-29 11:18:32 -08002487 sk_change_net(sk, net);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002488
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002489 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002490 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002491 else
2492 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002493
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002494 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002495 if (!listeners)
2496 goto out_sock_release;
2497
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002499 if (cfg && cfg->input)
2500 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501
Herbert Xu8ea65f42015-01-26 14:02:56 +11002502 if (netlink_insert(sk, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002503 goto out_sock_release;
2504
2505 nlk = nlk_sk(sk);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002506 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002507
2508 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002509 if (!nl_table[unit].registered) {
2510 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002511 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002512 nl_table[unit].cb_mutex = cb_mutex;
2513 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002514 if (cfg) {
2515 nl_table[unit].bind = cfg->bind;
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09002516 nl_table[unit].unbind = cfg->unbind;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002517 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002518 if (cfg->compare)
2519 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002520 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002521 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002522 } else {
2523 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002524 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002525 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002526 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002527 return sk;
2528
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002529out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002530 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002531 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002532 return NULL;
2533
2534out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002535 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002536 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002538EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002539
2540void
2541netlink_kernel_release(struct sock *sk)
2542{
Denis V. Lunevedf02082008-02-29 11:18:32 -08002543 sk_release_kernel(sk);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002544}
2545EXPORT_SYMBOL(netlink_kernel_release);
2546
Johannes Bergd136f1b2009-09-12 03:03:15 +00002547int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002548{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002549 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002550 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002551
2552 if (groups < 32)
2553 groups = 32;
2554
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002555 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002556 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2557 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002558 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002559 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002560 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2561 rcu_assign_pointer(tbl->listeners, new);
2562
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002563 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002564 }
2565 tbl->groups = groups;
2566
Johannes Bergd136f1b2009-09-12 03:03:15 +00002567 return 0;
2568}
2569
2570/**
2571 * netlink_change_ngroups - change number of multicast groups
2572 *
2573 * This changes the number of multicast groups that are available
2574 * on a certain netlink family. Note that it is not possible to
2575 * change the number of groups to below 32. Also note that it does
2576 * not implicitly call netlink_clear_multicast_users() when the
2577 * number of groups is reduced.
2578 *
2579 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2580 * @groups: The new number of groups.
2581 */
2582int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2583{
2584 int err;
2585
2586 netlink_table_grab();
2587 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002588 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002589
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002590 return err;
2591}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002592
Johannes Bergb8273572009-09-24 15:44:05 -07002593void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2594{
2595 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002596 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2597
Sasha Levinb67bfe02013-02-27 17:06:00 -08002598 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002599 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2600}
2601
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002602struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002603__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002604{
2605 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002606 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002607
Wang Yufen23b45672014-02-17 16:53:32 +08002608 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002609 nlh->nlmsg_type = type;
2610 nlh->nlmsg_len = size;
2611 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002612 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002613 nlh->nlmsg_seq = seq;
2614 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002615 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002616 return nlh;
2617}
2618EXPORT_SYMBOL(__nlmsg_put);
2619
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620/*
2621 * It looks a bit ugly.
2622 * It would be better to create kernel thread.
2623 */
2624
2625static int netlink_dump(struct sock *sk)
2626{
2627 struct netlink_sock *nlk = nlk_sk(sk);
2628 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002629 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 struct nlmsghdr *nlh;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002631 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002632 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002634 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002635 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002636 err = -EINVAL;
2637 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 }
2639
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002640 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002641 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2642
Patrick McHardyf9c22882013-04-17 06:47:04 +00002643 if (!netlink_rx_is_mmaped(sk) &&
2644 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2645 goto errout_skb;
Eric Dumazet9063e212014-03-07 12:02:33 -08002646
2647 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2648 * required, but it makes sense to _attempt_ a 16K bytes allocation
2649 * to reduce number of system calls on dump operations, if user
2650 * ever provided a big enough buffer.
2651 */
2652 if (alloc_size < nlk->max_recvmsg_len) {
2653 skb = netlink_alloc_skb(sk,
2654 nlk->max_recvmsg_len,
2655 nlk->portid,
2656 GFP_KERNEL |
2657 __GFP_NOWARN |
2658 __GFP_NORETRY);
2659 /* available room should be exact amount to avoid MSG_TRUNC */
2660 if (skb)
2661 skb_reserve(skb, skb_tailroom(skb) -
2662 nlk->max_recvmsg_len);
2663 }
2664 if (!skb)
2665 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2666 GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002667 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002668 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002669 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002670
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 len = cb->dump(skb, cb);
2672
2673 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002674 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002675
2676 if (sk_filter(sk, skb))
2677 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002678 else
2679 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 return 0;
2681 }
2682
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002683 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2684 if (!nlh)
2685 goto errout_skb;
2686
Johannes Berg670dc282011-06-20 13:40:46 +02002687 nl_dump_check_consistent(cb, nlh);
2688
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002689 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2690
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002691 if (sk_filter(sk, skb))
2692 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002693 else
2694 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695
Thomas Grafa8f74b22005-11-10 02:25:52 +01002696 if (cb->done)
2697 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002699 nlk->cb_running = false;
2700 mutex_unlock(nlk->cb_mutex);
Gao feng6dc878a2012-10-04 20:15:48 +00002701 module_put(cb->module);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002702 consume_skb(cb->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002704
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002705errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002706 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002707 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002708 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709}
2710
Gao feng6dc878a2012-10-04 20:15:48 +00002711int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2712 const struct nlmsghdr *nlh,
2713 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714{
2715 struct netlink_callback *cb;
2716 struct sock *sk;
2717 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002718 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
Patrick McHardyf9c22882013-04-17 06:47:04 +00002720 /* Memory mapped dump requests need to be copied to avoid looping
2721 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2722 * a reference to the skb.
2723 */
2724 if (netlink_skb_is_mmaped(skb)) {
2725 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002726 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002727 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002728 } else
2729 atomic_inc(&skb->users);
2730
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002731 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2732 if (sk == NULL) {
2733 ret = -ECONNREFUSED;
2734 goto error_free;
2735 }
2736
2737 nlk = nlk_sk(sk);
2738 mutex_lock(nlk->cb_mutex);
2739 /* A dump is in progress... */
2740 if (nlk->cb_running) {
2741 ret = -EBUSY;
2742 goto error_unlock;
2743 }
2744 /* add reference of module which cb->dump belongs to */
2745 if (!try_module_get(control->module)) {
2746 ret = -EPROTONOSUPPORT;
2747 goto error_unlock;
2748 }
2749
2750 cb = &nlk->cb;
2751 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002752 cb->dump = control->dump;
2753 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002755 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002756 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002757 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 cb->skb = skb;
2759
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002760 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002761
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002762 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
Andrey Vaginb44d2112011-02-21 02:40:47 +00002764 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002766
Andrey Vaginb44d2112011-02-21 02:40:47 +00002767 if (ret)
2768 return ret;
2769
Denis V. Lunev5c582982007-10-23 20:29:25 -07002770 /* We successfully started a dump, by returning -EINTR we
2771 * signal not to send ACK even if it was requested.
2772 */
2773 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002774
2775error_unlock:
2776 sock_put(sk);
2777 mutex_unlock(nlk->cb_mutex);
2778error_free:
2779 kfree_skb(skb);
2780 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781}
Gao feng6dc878a2012-10-04 20:15:48 +00002782EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783
2784void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2785{
2786 struct sk_buff *skb;
2787 struct nlmsghdr *rep;
2788 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002789 size_t payload = sizeof(*errmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790
Thomas Graf339bf982006-11-10 14:10:15 -08002791 /* error messages get the original request appened */
2792 if (err)
2793 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
Patrick McHardyf9c22882013-04-17 06:47:04 +00002795 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2796 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 if (!skb) {
2798 struct sock *sk;
2799
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002800 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002801 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002802 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 if (sk) {
2804 sk->sk_err = ENOBUFS;
2805 sk->sk_error_report(sk);
2806 sock_put(sk);
2807 }
2808 return;
2809 }
2810
Eric W. Biederman15e47302012-09-07 20:12:54 +00002811 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002812 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002813 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 errmsg->error = err;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002815 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002816 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002818EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002820int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002821 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002822{
Thomas Graf82ace472005-11-10 02:25:53 +01002823 struct nlmsghdr *nlh;
2824 int err;
2825
2826 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002827 int msglen;
2828
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002829 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002830 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002831
Martin Murrayad8e4b72006-01-10 13:02:29 -08002832 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002833 return 0;
2834
Thomas Grafd35b6852007-03-22 23:28:46 -07002835 /* Only requests are handled by the kernel */
2836 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002837 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002838
Thomas Graf45e7ae72007-03-22 23:29:10 -07002839 /* Skip control messages */
2840 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002841 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002842
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002843 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002844 if (err == -EINTR)
2845 goto skip;
2846
2847ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002848 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002849 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002850
Denis V. Lunev5c582982007-10-23 20:29:25 -07002851skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002852 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002853 if (msglen > skb->len)
2854 msglen = skb->len;
2855 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002856 }
2857
2858 return 0;
2859}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002860EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002861
2862/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002863 * nlmsg_notify - send a notification netlink message
2864 * @sk: netlink socket to use
2865 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002866 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002867 * @group: destination multicast group or 0
2868 * @report: 1 to report back, 0 to disable
2869 * @flags: allocation flags
2870 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002871int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002872 unsigned int group, int report, gfp_t flags)
2873{
2874 int err = 0;
2875
2876 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002877 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002878
2879 if (report) {
2880 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002881 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002882 }
2883
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002884 /* errors reported via destination sk->sk_err, but propagate
2885 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002886 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002887 }
2888
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002889 if (report) {
2890 int err2;
2891
Eric W. Biederman15e47302012-09-07 20:12:54 +00002892 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002893 if (!err || err == -ESRCH)
2894 err = err2;
2895 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07002896
2897 return err;
2898}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002899EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002900
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901#ifdef CONFIG_PROC_FS
2902struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08002903 struct seq_net_private p;
Herbert Xu56d28b12015-02-04 07:33:24 +11002904 struct rhashtable_iter hti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 int link;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906};
2907
Herbert Xu56d28b12015-02-04 07:33:24 +11002908static int netlink_walk_start(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909{
Herbert Xu56d28b12015-02-04 07:33:24 +11002910 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
Herbert Xu56d28b12015-02-04 07:33:24 +11002912 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
2913 if (err) {
2914 iter->link = MAX_LINKS;
2915 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 }
Herbert Xu56d28b12015-02-04 07:33:24 +11002917
2918 err = rhashtable_walk_start(&iter->hti);
2919 return err == -EAGAIN ? 0 : err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920}
2921
Herbert Xu56d28b12015-02-04 07:33:24 +11002922static void netlink_walk_stop(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923{
Herbert Xu56d28b12015-02-04 07:33:24 +11002924 rhashtable_walk_stop(&iter->hti);
2925 rhashtable_walk_exit(&iter->hti);
2926}
2927
2928static void *__netlink_seq_next(struct seq_file *seq)
2929{
2930 struct nl_seq_iter *iter = seq->private;
2931 struct netlink_sock *nlk;
2932
2933 do {
2934 for (;;) {
2935 int err;
2936
2937 nlk = rhashtable_walk_next(&iter->hti);
2938
2939 if (IS_ERR(nlk)) {
2940 if (PTR_ERR(nlk) == -EAGAIN)
2941 continue;
2942
2943 return nlk;
2944 }
2945
2946 if (nlk)
2947 break;
2948
2949 netlink_walk_stop(iter);
2950 if (++iter->link >= MAX_LINKS)
2951 return NULL;
2952
2953 err = netlink_walk_start(iter);
2954 if (err)
2955 return ERR_PTR(err);
2956 }
2957 } while (sock_net(&nlk->sk) != seq_file_net(seq));
2958
2959 return nlk;
2960}
2961
2962static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
2963{
2964 struct nl_seq_iter *iter = seq->private;
2965 void *obj = SEQ_START_TOKEN;
2966 loff_t pos;
2967 int err;
2968
2969 iter->link = 0;
2970
2971 err = netlink_walk_start(iter);
2972 if (err)
2973 return ERR_PTR(err);
2974
2975 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
2976 obj = __netlink_seq_next(seq);
2977
2978 return obj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979}
2980
2981static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2982{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 ++*pos;
Herbert Xu56d28b12015-02-04 07:33:24 +11002984 return __netlink_seq_next(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985}
2986
2987static void netlink_seq_stop(struct seq_file *seq, void *v)
2988{
Herbert Xu56d28b12015-02-04 07:33:24 +11002989 struct nl_seq_iter *iter = seq->private;
2990
2991 if (iter->link >= MAX_LINKS)
2992 return;
2993
2994 netlink_walk_stop(iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995}
2996
2997
2998static int netlink_seq_show(struct seq_file *seq, void *v)
2999{
Eric Dumazet658cb352012-04-22 21:30:21 +00003000 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 seq_puts(seq,
3002 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003003 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00003004 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 struct sock *s = v;
3006 struct netlink_sock *nlk = nlk_sk(s);
3007
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003008 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 s,
3010 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003011 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07003012 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07003013 sk_rmem_alloc_get(s),
3014 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003015 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07003016 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003017 atomic_read(&s->sk_drops),
3018 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 );
3020
3021 }
3022 return 0;
3023}
3024
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003025static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 .start = netlink_seq_start,
3027 .next = netlink_seq_next,
3028 .stop = netlink_seq_stop,
3029 .show = netlink_seq_show,
3030};
3031
3032
3033static int netlink_seq_open(struct inode *inode, struct file *file)
3034{
Denis V. Luneve372c412007-11-19 22:31:54 -08003035 return seq_open_net(inode, file, &netlink_seq_ops,
3036 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003037}
3038
Arjan van de Venda7071d2007-02-12 00:55:36 -08003039static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 .owner = THIS_MODULE,
3041 .open = netlink_seq_open,
3042 .read = seq_read,
3043 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003044 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045};
3046
3047#endif
3048
3049int netlink_register_notifier(struct notifier_block *nb)
3050{
Alan Sterne041c682006-03-27 01:16:30 -08003051 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003053EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
3055int netlink_unregister_notifier(struct notifier_block *nb)
3056{
Alan Sterne041c682006-03-27 01:16:30 -08003057 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003059EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003060
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003061static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 .family = PF_NETLINK,
3063 .owner = THIS_MODULE,
3064 .release = netlink_release,
3065 .bind = netlink_bind,
3066 .connect = netlink_connect,
3067 .socketpair = sock_no_socketpair,
3068 .accept = sock_no_accept,
3069 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003070 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 .ioctl = sock_no_ioctl,
3072 .listen = sock_no_listen,
3073 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003074 .setsockopt = netlink_setsockopt,
3075 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 .sendmsg = netlink_sendmsg,
3077 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003078 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 .sendpage = sock_no_sendpage,
3080};
3081
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003082static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 .family = PF_NETLINK,
3084 .create = netlink_create,
3085 .owner = THIS_MODULE, /* for consistency 8) */
3086};
3087
Pavel Emelyanov46650792007-10-08 20:38:39 -07003088static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003089{
3090#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003091 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003092 return -ENOMEM;
3093#endif
3094 return 0;
3095}
3096
Pavel Emelyanov46650792007-10-08 20:38:39 -07003097static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003098{
3099#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003100 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003101#endif
3102}
3103
David S. Millerb963ea82010-08-30 19:08:01 -07003104static void __init netlink_add_usersock_entry(void)
3105{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003106 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003107 int groups = 32;
3108
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003109 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003110 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003111 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003112
3113 netlink_table_grab();
3114
3115 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003116 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003117 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3118 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003119 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003120
3121 netlink_table_ungrab();
3122}
3123
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003124static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003125 .init = netlink_net_init,
3126 .exit = netlink_net_exit,
3127};
3128
Patrick McHardy49f7b332015-03-25 13:07:45 +00003129static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
Herbert Xuc428ecd2015-03-20 21:57:01 +11003130{
3131 const struct netlink_sock *nlk = data;
3132 struct netlink_compare_arg arg;
3133
3134 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
Herbert Xu11b58ba2015-03-24 00:50:22 +11003135 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
Herbert Xuc428ecd2015-03-20 21:57:01 +11003136}
3137
3138static const struct rhashtable_params netlink_rhashtable_params = {
3139 .head_offset = offsetof(struct netlink_sock, node),
3140 .key_len = netlink_compare_arg_len,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003141 .obj_hashfn = netlink_hash,
3142 .obj_cmpfn = netlink_compare,
3143 .max_size = 65536,
Thomas Grafb5e2c152015-03-24 20:42:19 +00003144 .automatic_shrinking = true,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003145};
3146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147static int __init netlink_proto_init(void)
3148{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 int err = proto_register(&netlink_proto, 0);
3151
3152 if (err != 0)
3153 goto out;
3154
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003155 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003157 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003158 if (!nl_table)
3159 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 for (i = 0; i < MAX_LINKS; i++) {
Herbert Xuc428ecd2015-03-20 21:57:01 +11003162 if (rhashtable_init(&nl_table[i].hash,
3163 &netlink_rhashtable_params) < 0) {
Thomas Grafe3416942014-08-02 11:47:45 +02003164 while (--i > 0)
3165 rhashtable_destroy(&nl_table[i].hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003167 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 }
3170
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003171 INIT_LIST_HEAD(&netlink_tap_all);
3172
David S. Millerb963ea82010-08-30 19:08:01 -07003173 netlink_add_usersock_entry();
3174
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003176 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003177 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 rtnetlink_init();
3179out:
3180 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003181panic:
3182 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183}
3184
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185core_initcall(netlink_proto_init);