blob: be6665ab7f409f0b8e5856c3abf893a9232492a8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Thomas Grafe3416942014-08-02 11:47:45 +020061#include <linux/rhashtable.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000062#include <asm/cacheflush.h>
Thomas Grafe3416942014-08-02 11:47:45 +020063#include <linux/hash.h>
Johannes Bergee1c24422015-01-16 11:37:14 +010064#include <linux/genetlink.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/sock.h>
68#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010069#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Andrey Vagin0f29c762013-03-21 20:33:47 +040071#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Eric Dumazet5c398dc2010-10-24 04:27:10 +000073struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000076};
77
Patrick McHardycd967e02013-04-17 06:46:56 +000078/* state bits */
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020079#define NETLINK_S_CONGESTED 0x0
Patrick McHardycd967e02013-04-17 06:46:56 +000080
81/* flags */
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020082#define NETLINK_F_KERNEL_SOCKET 0x1
83#define NETLINK_F_RECV_PKTINFO 0x2
84#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85#define NETLINK_F_RECV_NO_ENOBUFS 0x8
Nicolas Dichtel59324cf2015-05-07 11:02:53 +020086#define NETLINK_F_LISTEN_ALL_NSID 0x10
Patrick McHardy77247bb2005-08-14 19:27:13 -070087
David S. Miller035c4c12011-12-23 17:33:03 -050088static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070089{
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020090 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
Denis V. Lunevaed81562007-10-10 21:14:32 -070091}
92
Andrey Vagin0f29c762013-03-21 20:33:47 +040093struct netlink_table *nl_table;
94EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
97
98static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +000099static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Thomas Graf78fd1d02014-10-21 22:05:38 +0200101/* nl_table locking explained:
Thomas Graf21e49022015-01-02 23:00:22 +0100102 * Lookup and traversal are protected with an RCU read-side lock. Insertion
Ying Xuec5adde92015-01-12 14:52:23 +0800103 * and removal are protected with per bucket lock while using RCU list
Thomas Graf21e49022015-01-02 23:00:22 +0100104 * modification primitives and may run in parallel to RCU protected lookups.
105 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
106 * been acquired * either during or after the socket has been removed from
107 * the list and after an RCU grace period.
Thomas Graf78fd1d02014-10-21 22:05:38 +0200108 */
Andrey Vagin0f29c762013-03-21 20:33:47 +0400109DEFINE_RWLOCK(nl_table_lock);
110EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static atomic_t nl_table_users = ATOMIC_INIT(0);
112
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000113#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
114
Alan Sterne041c682006-03-27 01:16:30 -0800115static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200117static DEFINE_SPINLOCK(netlink_tap_lock);
118static struct list_head netlink_tap_all __read_mostly;
119
Herbert Xuc428ecd2015-03-20 21:57:01 +1100120static const struct rhashtable_params netlink_rhashtable_params;
121
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000122static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700123{
124 return group ? 1 << (group - 1) : 0;
125}
126
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200127int netlink_add_tap(struct netlink_tap *nt)
128{
129 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
130 return -EINVAL;
131
132 spin_lock(&netlink_tap_lock);
133 list_add_rcu(&nt->list, &netlink_tap_all);
134 spin_unlock(&netlink_tap_lock);
135
Markus Elfringfcd4d352014-11-18 21:03:13 +0100136 __module_get(nt->module);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200137
138 return 0;
139}
140EXPORT_SYMBOL_GPL(netlink_add_tap);
141
stephen hemminger2173f8d2013-12-30 10:49:22 -0800142static int __netlink_remove_tap(struct netlink_tap *nt)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200143{
144 bool found = false;
145 struct netlink_tap *tmp;
146
147 spin_lock(&netlink_tap_lock);
148
149 list_for_each_entry(tmp, &netlink_tap_all, list) {
150 if (nt == tmp) {
151 list_del_rcu(&nt->list);
152 found = true;
153 goto out;
154 }
155 }
156
157 pr_warn("__netlink_remove_tap: %p not found\n", nt);
158out:
159 spin_unlock(&netlink_tap_lock);
160
161 if (found && nt->module)
162 module_put(nt->module);
163
164 return found ? 0 : -ENODEV;
165}
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200166
167int netlink_remove_tap(struct netlink_tap *nt)
168{
169 int ret;
170
171 ret = __netlink_remove_tap(nt);
172 synchronize_net();
173
174 return ret;
175}
176EXPORT_SYMBOL_GPL(netlink_remove_tap);
177
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200178static bool netlink_filter_tap(const struct sk_buff *skb)
179{
180 struct sock *sk = skb->sk;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200181
182 /* We take the more conservative approach and
183 * whitelist socket protocols that may pass.
184 */
185 switch (sk->sk_protocol) {
186 case NETLINK_ROUTE:
187 case NETLINK_USERSOCK:
188 case NETLINK_SOCK_DIAG:
189 case NETLINK_NFLOG:
190 case NETLINK_XFRM:
191 case NETLINK_FIB_LOOKUP:
192 case NETLINK_NETFILTER:
193 case NETLINK_GENERIC:
Varka Bhadram498044b2014-07-16 10:59:47 +0530194 return true;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200195 }
196
Varka Bhadram498044b2014-07-16 10:59:47 +0530197 return false;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200198}
199
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200200static int __netlink_deliver_tap_skb(struct sk_buff *skb,
201 struct net_device *dev)
202{
203 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200204 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200205 int ret = -ENOMEM;
206
207 dev_hold(dev);
208 nskb = skb_clone(skb, GFP_ATOMIC);
209 if (nskb) {
210 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200211 nskb->protocol = htons((u16) sk->sk_protocol);
Daniel Borkmann604d13c2013-12-23 14:35:56 +0100212 nskb->pkt_type = netlink_is_kernel(sk) ?
213 PACKET_KERNEL : PACKET_USER;
Daniel Borkmann4e48ed82014-08-07 22:22:47 +0200214 skb_reset_network_header(nskb);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200215 ret = dev_queue_xmit(nskb);
216 if (unlikely(ret > 0))
217 ret = net_xmit_errno(ret);
218 }
219
220 dev_put(dev);
221 return ret;
222}
223
224static void __netlink_deliver_tap(struct sk_buff *skb)
225{
226 int ret;
227 struct netlink_tap *tmp;
228
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200229 if (!netlink_filter_tap(skb))
230 return;
231
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200232 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
233 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
234 if (unlikely(ret))
235 break;
236 }
237}
238
239static void netlink_deliver_tap(struct sk_buff *skb)
240{
241 rcu_read_lock();
242
243 if (unlikely(!list_empty(&netlink_tap_all)))
244 __netlink_deliver_tap(skb);
245
246 rcu_read_unlock();
247}
248
Daniel Borkmann73bfd372013-12-23 14:35:55 +0100249static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
250 struct sk_buff *skb)
251{
252 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
253 netlink_deliver_tap(skb);
254}
255
Patrick McHardycd1df522013-04-17 06:47:05 +0000256static void netlink_overrun(struct sock *sk)
257{
258 struct netlink_sock *nlk = nlk_sk(sk);
259
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +0200260 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
261 if (!test_and_set_bit(NETLINK_S_CONGESTED,
262 &nlk_sk(sk)->state)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000263 sk->sk_err = ENOBUFS;
264 sk->sk_error_report(sk);
265 }
266 }
267 atomic_inc(&sk->sk_drops);
268}
269
270static void netlink_rcv_wake(struct sock *sk)
271{
272 struct netlink_sock *nlk = nlk_sk(sk);
273
274 if (skb_queue_empty(&sk->sk_receive_queue))
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +0200275 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
276 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
Patrick McHardycd1df522013-04-17 06:47:05 +0000277 wake_up_interruptible(&nlk->wait);
278}
279
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000280#ifdef CONFIG_NETLINK_MMAP
Patrick McHardy9652e932013-04-17 06:47:02 +0000281static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
282{
283 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
284}
285
Patrick McHardyf9c22882013-04-17 06:47:04 +0000286static bool netlink_rx_is_mmaped(struct sock *sk)
287{
288 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
289}
290
Patrick McHardy5fd96122013-04-17 06:47:03 +0000291static bool netlink_tx_is_mmaped(struct sock *sk)
292{
293 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
294}
295
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000296static __pure struct page *pgvec_to_page(const void *addr)
297{
298 if (is_vmalloc_addr(addr))
299 return vmalloc_to_page(addr);
300 else
301 return virt_to_page(addr);
302}
303
304static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
305{
306 unsigned int i;
307
308 for (i = 0; i < len; i++) {
309 if (pg_vec[i] != NULL) {
310 if (is_vmalloc_addr(pg_vec[i]))
311 vfree(pg_vec[i]);
312 else
313 free_pages((unsigned long)pg_vec[i], order);
314 }
315 }
316 kfree(pg_vec);
317}
318
319static void *alloc_one_pg_vec_page(unsigned long order)
320{
321 void *buffer;
322 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
323 __GFP_NOWARN | __GFP_NORETRY;
324
325 buffer = (void *)__get_free_pages(gfp_flags, order);
326 if (buffer != NULL)
327 return buffer;
328
329 buffer = vzalloc((1 << order) * PAGE_SIZE);
330 if (buffer != NULL)
331 return buffer;
332
333 gfp_flags &= ~__GFP_NORETRY;
334 return (void *)__get_free_pages(gfp_flags, order);
335}
336
337static void **alloc_pg_vec(struct netlink_sock *nlk,
338 struct nl_mmap_req *req, unsigned int order)
339{
340 unsigned int block_nr = req->nm_block_nr;
341 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200342 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000343
344 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
345 if (pg_vec == NULL)
346 return NULL;
347
348 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200349 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000350 if (pg_vec[i] == NULL)
351 goto err1;
352 }
353
354 return pg_vec;
355err1:
356 free_pg_vec(pg_vec, order, block_nr);
357 return NULL;
358}
359
360static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
361 bool closing, bool tx_ring)
362{
363 struct netlink_sock *nlk = nlk_sk(sk);
364 struct netlink_ring *ring;
365 struct sk_buff_head *queue;
366 void **pg_vec = NULL;
367 unsigned int order = 0;
368 int err;
369
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
372
373 if (!closing) {
374 if (atomic_read(&nlk->mapped))
375 return -EBUSY;
376 if (atomic_read(&ring->pending))
377 return -EBUSY;
378 }
379
380 if (req->nm_block_nr) {
381 if (ring->pg_vec != NULL)
382 return -EBUSY;
383
384 if ((int)req->nm_block_size <= 0)
385 return -EINVAL;
Tobias Klauser74e83b22014-07-31 12:17:08 +0200386 if (!PAGE_ALIGNED(req->nm_block_size))
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000387 return -EINVAL;
388 if (req->nm_frame_size < NL_MMAP_HDRLEN)
389 return -EINVAL;
390 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
391 return -EINVAL;
392
393 ring->frames_per_block = req->nm_block_size /
394 req->nm_frame_size;
395 if (ring->frames_per_block == 0)
396 return -EINVAL;
397 if (ring->frames_per_block * req->nm_block_nr !=
398 req->nm_frame_nr)
399 return -EINVAL;
400
401 order = get_order(req->nm_block_size);
402 pg_vec = alloc_pg_vec(nlk, req, order);
403 if (pg_vec == NULL)
404 return -ENOMEM;
405 } else {
406 if (req->nm_frame_nr)
407 return -EINVAL;
408 }
409
410 err = -EBUSY;
411 mutex_lock(&nlk->pg_vec_lock);
412 if (closing || atomic_read(&nlk->mapped) == 0) {
413 err = 0;
414 spin_lock_bh(&queue->lock);
415
416 ring->frame_max = req->nm_frame_nr - 1;
417 ring->head = 0;
418 ring->frame_size = req->nm_frame_size;
419 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
420
421 swap(ring->pg_vec_len, req->nm_block_nr);
422 swap(ring->pg_vec_order, order);
423 swap(ring->pg_vec, pg_vec);
424
425 __skb_queue_purge(queue);
426 spin_unlock_bh(&queue->lock);
427
428 WARN_ON(atomic_read(&nlk->mapped));
429 }
430 mutex_unlock(&nlk->pg_vec_lock);
431
432 if (pg_vec)
433 free_pg_vec(pg_vec, order, req->nm_block_nr);
434 return err;
435}
436
437static void netlink_mm_open(struct vm_area_struct *vma)
438{
439 struct file *file = vma->vm_file;
440 struct socket *sock = file->private_data;
441 struct sock *sk = sock->sk;
442
443 if (sk)
444 atomic_inc(&nlk_sk(sk)->mapped);
445}
446
447static void netlink_mm_close(struct vm_area_struct *vma)
448{
449 struct file *file = vma->vm_file;
450 struct socket *sock = file->private_data;
451 struct sock *sk = sock->sk;
452
453 if (sk)
454 atomic_dec(&nlk_sk(sk)->mapped);
455}
456
457static const struct vm_operations_struct netlink_mmap_ops = {
458 .open = netlink_mm_open,
459 .close = netlink_mm_close,
460};
461
462static int netlink_mmap(struct file *file, struct socket *sock,
463 struct vm_area_struct *vma)
464{
465 struct sock *sk = sock->sk;
466 struct netlink_sock *nlk = nlk_sk(sk);
467 struct netlink_ring *ring;
468 unsigned long start, size, expected;
469 unsigned int i;
470 int err = -EINVAL;
471
472 if (vma->vm_pgoff)
473 return -EINVAL;
474
475 mutex_lock(&nlk->pg_vec_lock);
476
477 expected = 0;
478 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
479 if (ring->pg_vec == NULL)
480 continue;
481 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
482 }
483
484 if (expected == 0)
485 goto out;
486
487 size = vma->vm_end - vma->vm_start;
488 if (size != expected)
489 goto out;
490
491 start = vma->vm_start;
492 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
493 if (ring->pg_vec == NULL)
494 continue;
495
496 for (i = 0; i < ring->pg_vec_len; i++) {
497 struct page *page;
498 void *kaddr = ring->pg_vec[i];
499 unsigned int pg_num;
500
501 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
502 page = pgvec_to_page(kaddr);
503 err = vm_insert_page(vma, start, page);
504 if (err < 0)
505 goto out;
506 start += PAGE_SIZE;
507 kaddr += PAGE_SIZE;
508 }
509 }
510 }
511
512 atomic_inc(&nlk->mapped);
513 vma->vm_ops = &netlink_mmap_ops;
514 err = 0;
515out:
516 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700517 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000518}
Patrick McHardy9652e932013-04-17 06:47:02 +0000519
David Miller4682a032014-12-16 17:58:17 -0500520static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
Patrick McHardy9652e932013-04-17 06:47:02 +0000521{
522#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
523 struct page *p_start, *p_end;
524
525 /* First page is flushed through netlink_{get,set}_status */
526 p_start = pgvec_to_page(hdr + PAGE_SIZE);
David Miller4682a032014-12-16 17:58:17 -0500527 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000528 while (p_start <= p_end) {
529 flush_dcache_page(p_start);
530 p_start++;
531 }
532#endif
533}
534
535static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
536{
537 smp_rmb();
538 flush_dcache_page(pgvec_to_page(hdr));
539 return hdr->nm_status;
540}
541
542static void netlink_set_status(struct nl_mmap_hdr *hdr,
543 enum nl_mmap_status status)
544{
Thomas Grafa18e6a12014-12-18 10:30:26 +0000545 smp_mb();
Patrick McHardy9652e932013-04-17 06:47:02 +0000546 hdr->nm_status = status;
547 flush_dcache_page(pgvec_to_page(hdr));
Patrick McHardy9652e932013-04-17 06:47:02 +0000548}
549
550static struct nl_mmap_hdr *
551__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
552{
553 unsigned int pg_vec_pos, frame_off;
554
555 pg_vec_pos = pos / ring->frames_per_block;
556 frame_off = pos % ring->frames_per_block;
557
558 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
559}
560
561static struct nl_mmap_hdr *
562netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
563 enum nl_mmap_status status)
564{
565 struct nl_mmap_hdr *hdr;
566
567 hdr = __netlink_lookup_frame(ring, pos);
568 if (netlink_get_status(hdr) != status)
569 return NULL;
570
571 return hdr;
572}
573
574static struct nl_mmap_hdr *
575netlink_current_frame(const struct netlink_ring *ring,
576 enum nl_mmap_status status)
577{
578 return netlink_lookup_frame(ring, ring->head, status);
579}
580
581static struct nl_mmap_hdr *
582netlink_previous_frame(const struct netlink_ring *ring,
583 enum nl_mmap_status status)
584{
585 unsigned int prev;
586
587 prev = ring->head ? ring->head - 1 : ring->frame_max;
588 return netlink_lookup_frame(ring, prev, status);
589}
590
591static void netlink_increment_head(struct netlink_ring *ring)
592{
593 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
594}
595
596static void netlink_forward_ring(struct netlink_ring *ring)
597{
598 unsigned int head = ring->head, pos = head;
599 const struct nl_mmap_hdr *hdr;
600
601 do {
602 hdr = __netlink_lookup_frame(ring, pos);
603 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
604 break;
605 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
606 break;
607 netlink_increment_head(ring);
608 } while (ring->head != head);
609}
610
Patrick McHardycd1df522013-04-17 06:47:05 +0000611static bool netlink_dump_space(struct netlink_sock *nlk)
612{
613 struct netlink_ring *ring = &nlk->rx_ring;
614 struct nl_mmap_hdr *hdr;
615 unsigned int n;
616
617 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
618 if (hdr == NULL)
619 return false;
620
621 n = ring->head + ring->frame_max / 2;
622 if (n > ring->frame_max)
623 n -= ring->frame_max;
624
625 hdr = __netlink_lookup_frame(ring, n);
626
627 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
628}
629
Patrick McHardy9652e932013-04-17 06:47:02 +0000630static unsigned int netlink_poll(struct file *file, struct socket *sock,
631 poll_table *wait)
632{
633 struct sock *sk = sock->sk;
634 struct netlink_sock *nlk = nlk_sk(sk);
635 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000636 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000637
Patrick McHardycd1df522013-04-17 06:47:05 +0000638 if (nlk->rx_ring.pg_vec != NULL) {
639 /* Memory mapped sockets don't call recvmsg(), so flow control
640 * for dumps is performed here. A dump is allowed to continue
641 * if at least half the ring is unused.
642 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700643 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000644 err = netlink_dump(sk);
645 if (err < 0) {
Ben Pfaffac30ef82014-07-09 10:31:22 -0700646 sk->sk_err = -err;
Patrick McHardycd1df522013-04-17 06:47:05 +0000647 sk->sk_error_report(sk);
648 break;
649 }
650 }
651 netlink_rcv_wake(sk);
652 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000653
Patrick McHardy9652e932013-04-17 06:47:02 +0000654 mask = datagram_poll(file, sock, wait);
655
656 spin_lock_bh(&sk->sk_receive_queue.lock);
657 if (nlk->rx_ring.pg_vec) {
658 netlink_forward_ring(&nlk->rx_ring);
659 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
660 mask |= POLLIN | POLLRDNORM;
661 }
662 spin_unlock_bh(&sk->sk_receive_queue.lock);
663
664 spin_lock_bh(&sk->sk_write_queue.lock);
665 if (nlk->tx_ring.pg_vec) {
666 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
667 mask |= POLLOUT | POLLWRNORM;
668 }
669 spin_unlock_bh(&sk->sk_write_queue.lock);
670
671 return mask;
672}
673
674static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
675{
676 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
677}
678
679static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
680 struct netlink_ring *ring,
681 struct nl_mmap_hdr *hdr)
682{
683 unsigned int size;
684 void *data;
685
686 size = ring->frame_size - NL_MMAP_HDRLEN;
687 data = (void *)hdr + NL_MMAP_HDRLEN;
688
689 skb->head = data;
690 skb->data = data;
691 skb_reset_tail_pointer(skb);
692 skb->end = skb->tail + size;
693 skb->len = 0;
694
695 skb->destructor = netlink_skb_destructor;
696 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
697 NETLINK_CB(skb).sk = sk;
698}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000699
700static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
701 u32 dst_portid, u32 dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100702 struct scm_cookie *scm)
Patrick McHardy5fd96122013-04-17 06:47:03 +0000703{
704 struct netlink_sock *nlk = nlk_sk(sk);
705 struct netlink_ring *ring;
706 struct nl_mmap_hdr *hdr;
707 struct sk_buff *skb;
708 unsigned int maxlen;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000709 int err = 0, len = 0;
710
Patrick McHardy5fd96122013-04-17 06:47:03 +0000711 mutex_lock(&nlk->pg_vec_lock);
712
713 ring = &nlk->tx_ring;
714 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
715
716 do {
David Miller4682a032014-12-16 17:58:17 -0500717 unsigned int nm_len;
718
Patrick McHardy5fd96122013-04-17 06:47:03 +0000719 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
720 if (hdr == NULL) {
721 if (!(msg->msg_flags & MSG_DONTWAIT) &&
722 atomic_read(&nlk->tx_ring.pending))
723 schedule();
724 continue;
725 }
David Miller4682a032014-12-16 17:58:17 -0500726
727 nm_len = ACCESS_ONCE(hdr->nm_len);
728 if (nm_len > maxlen) {
Patrick McHardy5fd96122013-04-17 06:47:03 +0000729 err = -EINVAL;
730 goto out;
731 }
732
David Miller4682a032014-12-16 17:58:17 -0500733 netlink_frame_flush_dcache(hdr, nm_len);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000734
David Miller4682a032014-12-16 17:58:17 -0500735 skb = alloc_skb(nm_len, GFP_KERNEL);
736 if (skb == NULL) {
737 err = -ENOBUFS;
738 goto out;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000739 }
David Miller4682a032014-12-16 17:58:17 -0500740 __skb_put(skb, nm_len);
741 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
742 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000743
744 netlink_increment_head(ring);
745
746 NETLINK_CB(skb).portid = nlk->portid;
747 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100748 NETLINK_CB(skb).creds = scm->creds;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000749
750 err = security_netlink_send(sk, skb);
751 if (err) {
752 kfree_skb(skb);
753 goto out;
754 }
755
756 if (unlikely(dst_group)) {
757 atomic_inc(&skb->users);
758 netlink_broadcast(sk, skb, dst_portid, dst_group,
759 GFP_KERNEL);
760 }
761 err = netlink_unicast(sk, skb, dst_portid,
762 msg->msg_flags & MSG_DONTWAIT);
763 if (err < 0)
764 goto out;
765 len += err;
766
767 } while (hdr != NULL ||
768 (!(msg->msg_flags & MSG_DONTWAIT) &&
769 atomic_read(&nlk->tx_ring.pending)));
770
771 if (len > 0)
772 err = len;
773out:
774 mutex_unlock(&nlk->pg_vec_lock);
775 return err;
776}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000777
778static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
779{
780 struct nl_mmap_hdr *hdr;
781
782 hdr = netlink_mmap_hdr(skb);
783 hdr->nm_len = skb->len;
784 hdr->nm_group = NETLINK_CB(skb).dst_group;
785 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200786 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
787 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
David Miller4682a032014-12-16 17:58:17 -0500788 netlink_frame_flush_dcache(hdr, hdr->nm_len);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000789 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
790
791 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
792 kfree_skb(skb);
793}
794
795static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
796{
797 struct netlink_sock *nlk = nlk_sk(sk);
798 struct netlink_ring *ring = &nlk->rx_ring;
799 struct nl_mmap_hdr *hdr;
800
801 spin_lock_bh(&sk->sk_receive_queue.lock);
802 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
803 if (hdr == NULL) {
804 spin_unlock_bh(&sk->sk_receive_queue.lock);
805 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000806 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000807 return;
808 }
809 netlink_increment_head(ring);
810 __skb_queue_tail(&sk->sk_receive_queue, skb);
811 spin_unlock_bh(&sk->sk_receive_queue.lock);
812
813 hdr->nm_len = skb->len;
814 hdr->nm_group = NETLINK_CB(skb).dst_group;
815 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200816 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
817 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000818 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
819}
820
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000821#else /* CONFIG_NETLINK_MMAP */
Patrick McHardy9652e932013-04-17 06:47:02 +0000822#define netlink_skb_is_mmaped(skb) false
Patrick McHardyf9c22882013-04-17 06:47:04 +0000823#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000824#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000825#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000826#define netlink_poll datagram_poll
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100827#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000828#endif /* CONFIG_NETLINK_MMAP */
829
Patrick McHardycf0a0182013-04-17 06:47:00 +0000830static void netlink_skb_destructor(struct sk_buff *skb)
831{
Patrick McHardy9652e932013-04-17 06:47:02 +0000832#ifdef CONFIG_NETLINK_MMAP
833 struct nl_mmap_hdr *hdr;
834 struct netlink_ring *ring;
835 struct sock *sk;
836
837 /* If a packet from the kernel to userspace was freed because of an
838 * error without being delivered to userspace, the kernel must reset
839 * the status. In the direction userspace to kernel, the status is
840 * always reset here after the packet was processed and freed.
841 */
842 if (netlink_skb_is_mmaped(skb)) {
843 hdr = netlink_mmap_hdr(skb);
844 sk = NETLINK_CB(skb).sk;
845
Patrick McHardy5fd96122013-04-17 06:47:03 +0000846 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
847 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
848 ring = &nlk_sk(sk)->tx_ring;
849 } else {
850 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
851 hdr->nm_len = 0;
852 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
853 }
854 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000855 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000856
857 WARN_ON(atomic_read(&ring->pending) == 0);
858 atomic_dec(&ring->pending);
859 sock_put(sk);
860
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000861 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000862 }
863#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000864 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200865 if (!skb->cloned ||
866 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
867 vfree(skb->head);
868
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000869 skb->head = NULL;
870 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000871 if (skb->sk != NULL)
872 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000873}
874
875static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
876{
877 WARN_ON(skb->sk != NULL);
878 skb->sk = sk;
879 skb->destructor = netlink_skb_destructor;
880 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
881 sk_mem_charge(sk, skb->truesize);
882}
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884static void netlink_sock_destruct(struct sock *sk)
885{
Herbert Xu3f660d62007-05-03 03:17:14 -0700886 struct netlink_sock *nlk = nlk_sk(sk);
887
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700888 if (nlk->cb_running) {
889 if (nlk->cb.done)
890 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000891
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700892 module_put(nlk->cb.module);
893 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700894 }
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000897#ifdef CONFIG_NETLINK_MMAP
898 if (1) {
899 struct nl_mmap_req req;
900
901 memset(&req, 0, sizeof(req));
902 if (nlk->rx_ring.pg_vec)
903 netlink_set_ring(sk, &req, true, false);
904 memset(&req, 0, sizeof(req));
905 if (nlk->tx_ring.pg_vec)
906 netlink_set_ring(sk, &req, true, true);
907 }
908#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
910 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800911 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 return;
913 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700914
915 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
916 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
917 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
919
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800920/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
921 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
923 * this, _but_ remember, it adds useless work on UP machines.
924 */
925
Johannes Bergd136f1b2009-09-12 03:03:15 +0000926void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800927 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000929 might_sleep();
930
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700931 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
933 if (atomic_read(&nl_table_users)) {
934 DECLARE_WAITQUEUE(wait, current);
935
936 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800937 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 set_current_state(TASK_UNINTERRUPTIBLE);
939 if (atomic_read(&nl_table_users) == 0)
940 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700941 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700943 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 }
945
946 __set_current_state(TASK_RUNNING);
947 remove_wait_queue(&nl_table_wait, &wait);
948 }
949}
950
Johannes Bergd136f1b2009-09-12 03:03:15 +0000951void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800952 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700954 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 wake_up(&nl_table_wait);
956}
957
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800958static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959netlink_lock_table(void)
960{
961 /* read_lock() synchronizes us to netlink_table_grab */
962
963 read_lock(&nl_table_lock);
964 atomic_inc(&nl_table_users);
965 read_unlock(&nl_table_lock);
966}
967
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800968static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969netlink_unlock_table(void)
970{
971 if (atomic_dec_and_test(&nl_table_users))
972 wake_up(&nl_table_wait);
973}
974
Thomas Grafe3416942014-08-02 11:47:45 +0200975struct netlink_compare_arg
Gao fengda12c902013-06-06 14:49:11 +0800976{
Herbert Xuc428ecd2015-03-20 21:57:01 +1100977 possible_net_t pnet;
Thomas Grafe3416942014-08-02 11:47:45 +0200978 u32 portid;
979};
980
Herbert Xu8f2ddaa2015-03-21 14:14:03 +1100981/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
982#define netlink_compare_arg_len \
983 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
Thomas Grafe3416942014-08-02 11:47:45 +0200984
Herbert Xuc428ecd2015-03-20 21:57:01 +1100985static inline int netlink_compare(struct rhashtable_compare_arg *arg,
986 const void *ptr)
987{
988 const struct netlink_compare_arg *x = arg->key;
989 const struct netlink_sock *nlk = ptr;
990
991 return nlk->portid != x->portid ||
992 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
993}
994
995static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
996 struct net *net, u32 portid)
997{
998 memset(arg, 0, sizeof(*arg));
999 write_pnet(&arg->pnet, net);
1000 arg->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001001}
1002
1003static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1004 struct net *net)
1005{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001006 struct netlink_compare_arg arg;
Thomas Grafe3416942014-08-02 11:47:45 +02001007
Herbert Xuc428ecd2015-03-20 21:57:01 +11001008 netlink_compare_arg_init(&arg, net, portid);
1009 return rhashtable_lookup_fast(&table->hash, &arg,
1010 netlink_rhashtable_params);
Gao fengda12c902013-06-06 14:49:11 +08001011}
1012
Herbert Xuc428ecd2015-03-20 21:57:01 +11001013static int __netlink_insert(struct netlink_table *table, struct sock *sk)
Ying Xuec5adde92015-01-12 14:52:23 +08001014{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001015 struct netlink_compare_arg arg;
Ying Xuec5adde92015-01-12 14:52:23 +08001016
Herbert Xuc428ecd2015-03-20 21:57:01 +11001017 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1018 return rhashtable_lookup_insert_key(&table->hash, &arg,
1019 &nlk_sk(sk)->node,
1020 netlink_rhashtable_params);
Ying Xuec5adde92015-01-12 14:52:23 +08001021}
1022
Eric W. Biederman15e47302012-09-07 20:12:54 +00001023static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
Gao fengda12c902013-06-06 14:49:11 +08001025 struct netlink_table *table = &nl_table[protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
Thomas Grafe3416942014-08-02 11:47:45 +02001028 rcu_read_lock();
1029 sk = __netlink_lookup(table, portid, net);
1030 if (sk)
1031 sock_hold(sk);
1032 rcu_read_unlock();
1033
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 return sk;
1035}
1036
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001037static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
Patrick McHardy4277a082006-03-20 18:52:01 -08001039static void
1040netlink_update_listeners(struct sock *sk)
1041{
1042 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001043 unsigned long mask;
1044 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001045 struct listeners *listeners;
1046
1047 listeners = nl_deref_protected(tbl->listeners);
1048 if (!listeners)
1049 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001050
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001051 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001052 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001053 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001054 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1055 mask |= nlk_sk(sk)->groups[i];
1056 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001057 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001058 }
1059 /* this function is only called with the netlink table "grabbed", which
1060 * makes sure updates are visible before bind or setsockopt return. */
1061}
1062
Herbert Xu8ea65f42015-01-26 14:02:56 +11001063static int netlink_insert(struct sock *sk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064{
Gao fengda12c902013-06-06 14:49:11 +08001065 struct netlink_table *table = &nl_table[sk->sk_protocol];
Herbert Xu919d9db2015-01-16 17:23:48 +11001066 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Ying Xuec5adde92015-01-12 14:52:23 +08001068 lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
1070 err = -EBUSY;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001071 if (nlk_sk(sk)->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 goto err;
1073
1074 err = -ENOMEM;
Thomas Graf97defe12015-01-02 23:00:20 +01001075 if (BITS_PER_LONG > 32 &&
1076 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 goto err;
1078
Eric W. Biederman15e47302012-09-07 20:12:54 +00001079 nlk_sk(sk)->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001080 sock_hold(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001081
Herbert Xuc428ecd2015-03-20 21:57:01 +11001082 err = __netlink_insert(table, sk);
1083 if (err) {
1084 if (err == -EEXIST)
1085 err = -EADDRINUSE;
Ying Xuec5adde92015-01-12 14:52:23 +08001086 sock_put(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001087 }
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089err:
Ying Xuec5adde92015-01-12 14:52:23 +08001090 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 return err;
1092}
1093
1094static void netlink_remove(struct sock *sk)
1095{
Thomas Grafe3416942014-08-02 11:47:45 +02001096 struct netlink_table *table;
1097
Thomas Grafe3416942014-08-02 11:47:45 +02001098 table = &nl_table[sk->sk_protocol];
Herbert Xuc428ecd2015-03-20 21:57:01 +11001099 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1100 netlink_rhashtable_params)) {
Thomas Grafe3416942014-08-02 11:47:45 +02001101 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1102 __sock_put(sk);
1103 }
Thomas Grafe3416942014-08-02 11:47:45 +02001104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 netlink_table_grab();
Johannes Bergb10dcb32014-12-22 18:56:37 +01001106 if (nlk_sk(sk)->subscriptions) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 __sk_del_bind_node(sk);
Johannes Bergb10dcb32014-12-22 18:56:37 +01001108 netlink_update_listeners(sk);
1109 }
Johannes Bergee1c24422015-01-16 11:37:14 +01001110 if (sk->sk_protocol == NETLINK_GENERIC)
1111 atomic_inc(&genl_sk_destructing_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 netlink_table_ungrab();
1113}
1114
1115static struct proto netlink_proto = {
1116 .name = "NETLINK",
1117 .owner = THIS_MODULE,
1118 .obj_size = sizeof(struct netlink_sock),
1119};
1120
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001121static int __netlink_create(struct net *net, struct socket *sock,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001122 struct mutex *cb_mutex, int protocol,
1123 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124{
1125 struct sock *sk;
1126 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001127
1128 sock->ops = &netlink_ops;
1129
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001130 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
Patrick McHardyab33a172005-08-14 19:31:36 -07001131 if (!sk)
1132 return -ENOMEM;
1133
1134 sock_init_data(sock, sk);
1135
1136 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001137 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001138 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001139 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001140 nlk->cb_mutex = &nlk->cb_def_mutex;
1141 mutex_init(nlk->cb_mutex);
1142 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001143 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001144#ifdef CONFIG_NETLINK_MMAP
1145 mutex_init(&nlk->pg_vec_lock);
1146#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001147
1148 sk->sk_destruct = netlink_sock_destruct;
1149 sk->sk_protocol = protocol;
1150 return 0;
1151}
1152
Eric Paris3f378b62009-11-05 22:18:14 -08001153static int netlink_create(struct net *net, struct socket *sock, int protocol,
1154 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001155{
1156 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001157 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001158 struct netlink_sock *nlk;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001159 int (*bind)(struct net *net, int group);
1160 void (*unbind)(struct net *net, int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001161 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
1163 sock->state = SS_UNCONNECTED;
1164
1165 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1166 return -ESOCKTNOSUPPORT;
1167
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001168 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return -EPROTONOSUPPORT;
1170
Patrick McHardy77247bb2005-08-14 19:27:13 -07001171 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001172#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001173 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001174 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001175 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001176 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001177 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001178#endif
1179 if (nl_table[protocol].registered &&
1180 try_module_get(nl_table[protocol].module))
1181 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001182 else
1183 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001184 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001185 bind = nl_table[protocol].bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001186 unbind = nl_table[protocol].unbind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001187 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001188
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001189 if (err < 0)
1190 goto out;
1191
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001192 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001193 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001194 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
David S. Miller6f756a82008-11-23 17:34:03 -08001196 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001197 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001198 local_bh_enable();
1199
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001200 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001201 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001202 nlk->netlink_bind = bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001203 nlk->netlink_unbind = unbind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001204out:
1205 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Patrick McHardyab33a172005-08-14 19:31:36 -07001207out_module:
1208 module_put(module);
1209 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210}
1211
Thomas Graf21e49022015-01-02 23:00:22 +01001212static void deferred_put_nlk_sk(struct rcu_head *head)
1213{
1214 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1215
1216 sock_put(&nlk->sk);
1217}
1218
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219static int netlink_release(struct socket *sock)
1220{
1221 struct sock *sk = sock->sk;
1222 struct netlink_sock *nlk;
1223
1224 if (!sk)
1225 return 0;
1226
1227 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001228 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 nlk = nlk_sk(sk);
1230
Herbert Xu3f660d62007-05-03 03:17:14 -07001231 /*
1232 * OK. Socket is unlinked, any packets that arrive now
1233 * will be purged.
1234 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Johannes Bergee1c24422015-01-16 11:37:14 +01001236 /* must not acquire netlink_table_lock in any way again before unbind
1237 * and notifying genetlink is done as otherwise it might deadlock
1238 */
1239 if (nlk->netlink_unbind) {
1240 int i;
1241
1242 for (i = 0; i < nlk->ngroups; i++)
1243 if (test_bit(i, nlk->groups))
1244 nlk->netlink_unbind(sock_net(sk), i + 1);
1245 }
1246 if (sk->sk_protocol == NETLINK_GENERIC &&
1247 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1248 wake_up(&genl_sk_destructing_waitq);
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 sock->sk = NULL;
1251 wake_up_interruptible_all(&nlk->wait);
1252
1253 skb_queue_purge(&sk->sk_write_queue);
1254
Eric W. Biederman15e47302012-09-07 20:12:54 +00001255 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001257 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001259 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 };
Alan Sterne041c682006-03-27 01:16:30 -08001261 atomic_notifier_call_chain(&netlink_chain,
1262 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001263 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001264
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001265 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001266
Denis V. Lunevaed81562007-10-10 21:14:32 -07001267 if (netlink_is_kernel(sk)) {
Johannes Bergb10dcb32014-12-22 18:56:37 +01001268 netlink_table_grab();
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001269 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1270 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001271 struct listeners *old;
1272
1273 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1274 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1275 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001276 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001277 nl_table[sk->sk_protocol].bind = NULL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001278 nl_table[sk->sk_protocol].unbind = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001279 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001280 nl_table[sk->sk_protocol].registered = 0;
1281 }
Johannes Bergb10dcb32014-12-22 18:56:37 +01001282 netlink_table_ungrab();
Eric Dumazet658cb352012-04-22 21:30:21 +00001283 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07001284
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001285 kfree(nlk->groups);
1286 nlk->groups = NULL;
1287
Eric Dumazet37558102008-11-24 14:05:22 -08001288 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001289 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001290 local_bh_enable();
Thomas Graf21e49022015-01-02 23:00:22 +01001291 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 return 0;
1293}
1294
1295static int netlink_autobind(struct socket *sock)
1296{
1297 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001298 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001299 struct netlink_table *table = &nl_table[sk->sk_protocol];
Eric W. Biederman15e47302012-09-07 20:12:54 +00001300 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 int err;
1302 static s32 rover = -4097;
1303
1304retry:
1305 cond_resched();
Thomas Grafe3416942014-08-02 11:47:45 +02001306 rcu_read_lock();
1307 if (__netlink_lookup(table, portid, net)) {
1308 /* Bind collision, search negative portid values. */
1309 portid = rover--;
1310 if (rover > -4097)
1311 rover = -4097;
1312 rcu_read_unlock();
1313 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 }
Thomas Grafe3416942014-08-02 11:47:45 +02001315 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Herbert Xu8ea65f42015-01-26 14:02:56 +11001317 err = netlink_insert(sk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 if (err == -EADDRINUSE)
1319 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001320
1321 /* If 2 threads race to autobind, that is fine. */
1322 if (err == -EBUSY)
1323 err = 0;
1324
1325 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326}
1327
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001328/**
1329 * __netlink_ns_capable - General netlink message capability test
1330 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1331 * @user_ns: The user namespace of the capability to use
1332 * @cap: The capability to use
1333 *
1334 * Test to see if the opener of the socket we received the message
1335 * from had when the netlink socket was created and the sender of the
1336 * message has has the capability @cap in the user namespace @user_ns.
1337 */
1338bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1339 struct user_namespace *user_ns, int cap)
1340{
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07001341 return ((nsp->flags & NETLINK_SKB_DST) ||
1342 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1343 ns_capable(user_ns, cap);
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001344}
1345EXPORT_SYMBOL(__netlink_ns_capable);
1346
1347/**
1348 * netlink_ns_capable - General netlink message capability test
1349 * @skb: socket buffer holding a netlink command from userspace
1350 * @user_ns: The user namespace of the capability to use
1351 * @cap: The capability to use
1352 *
1353 * Test to see if the opener of the socket we received the message
1354 * from had when the netlink socket was created and the sender of the
1355 * message has has the capability @cap in the user namespace @user_ns.
1356 */
1357bool netlink_ns_capable(const struct sk_buff *skb,
1358 struct user_namespace *user_ns, int cap)
1359{
1360 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1361}
1362EXPORT_SYMBOL(netlink_ns_capable);
1363
1364/**
1365 * netlink_capable - Netlink global message capability test
1366 * @skb: socket buffer holding a netlink command from userspace
1367 * @cap: The capability to use
1368 *
1369 * Test to see if the opener of the socket we received the message
1370 * from had when the netlink socket was created and the sender of the
1371 * message has has the capability @cap in all user namespaces.
1372 */
1373bool netlink_capable(const struct sk_buff *skb, int cap)
1374{
1375 return netlink_ns_capable(skb, &init_user_ns, cap);
1376}
1377EXPORT_SYMBOL(netlink_capable);
1378
1379/**
1380 * netlink_net_capable - Netlink network namespace message capability test
1381 * @skb: socket buffer holding a netlink command from userspace
1382 * @cap: The capability to use
1383 *
1384 * Test to see if the opener of the socket we received the message
1385 * from had when the netlink socket was created and the sender of the
1386 * message has has the capability @cap over the network namespace of
1387 * the socket we received the message from.
1388 */
1389bool netlink_net_capable(const struct sk_buff *skb, int cap)
1390{
1391 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1392}
1393EXPORT_SYMBOL(netlink_net_capable);
1394
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001395static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001396{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001397 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001398 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001399}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001401static void
1402netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1403{
1404 struct netlink_sock *nlk = nlk_sk(sk);
1405
1406 if (nlk->subscriptions && !subscriptions)
1407 __sk_del_bind_node(sk);
1408 else if (!nlk->subscriptions && subscriptions)
1409 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1410 nlk->subscriptions = subscriptions;
1411}
1412
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001413static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001414{
1415 struct netlink_sock *nlk = nlk_sk(sk);
1416 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001417 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001418 int err = 0;
1419
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001420 netlink_table_grab();
1421
Patrick McHardy513c2502005-09-06 15:43:59 -07001422 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001423 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001424 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001425 goto out_unlock;
1426 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001427
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001428 if (nlk->ngroups >= groups)
1429 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001430
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001431 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1432 if (new_groups == NULL) {
1433 err = -ENOMEM;
1434 goto out_unlock;
1435 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001436 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001437 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1438
1439 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001440 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001441 out_unlock:
1442 netlink_table_ungrab();
1443 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001444}
1445
Johannes Berg02c81ab2014-12-22 18:56:35 +01001446static void netlink_undo_bind(int group, long unsigned int groups,
Johannes Berg023e2cf2014-12-23 21:00:06 +01001447 struct sock *sk)
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001448{
Johannes Berg023e2cf2014-12-23 21:00:06 +01001449 struct netlink_sock *nlk = nlk_sk(sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001450 int undo;
1451
1452 if (!nlk->netlink_unbind)
1453 return;
1454
1455 for (undo = 0; undo < group; undo++)
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001456 if (test_bit(undo, &groups))
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001457 nlk->netlink_unbind(sock_net(sk), undo + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001458}
1459
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001460static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1461 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462{
1463 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001464 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 struct netlink_sock *nlk = nlk_sk(sk);
1466 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1467 int err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001468 long unsigned int groups = nladdr->nl_groups;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001469
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001470 if (addr_len < sizeof(struct sockaddr_nl))
1471 return -EINVAL;
1472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 if (nladdr->nl_family != AF_NETLINK)
1474 return -EINVAL;
1475
1476 /* Only superuser is allowed to listen multicasts */
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001477 if (groups) {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001478 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001479 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001480 err = netlink_realloc_groups(sk);
1481 if (err)
1482 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001485 if (nlk->portid)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001486 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 return -EINVAL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001488
1489 if (nlk->netlink_bind && groups) {
1490 int group;
1491
1492 for (group = 0; group < nlk->ngroups; group++) {
1493 if (!test_bit(group, &groups))
1494 continue;
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001495 err = nlk->netlink_bind(net, group + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001496 if (!err)
1497 continue;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001498 netlink_undo_bind(group, groups, sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001499 return err;
1500 }
1501 }
1502
1503 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 err = nladdr->nl_pid ?
Herbert Xu8ea65f42015-01-26 14:02:56 +11001505 netlink_insert(sk, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 netlink_autobind(sock);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001507 if (err) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01001508 netlink_undo_bind(nlk->ngroups, groups, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 return err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001510 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 }
1512
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001513 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 return 0;
1515
1516 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001517 netlink_update_subscriptions(sk, nlk->subscriptions +
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001518 hweight32(groups) -
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001519 hweight32(nlk->groups[0]));
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001520 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001521 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 netlink_table_ungrab();
1523
1524 return 0;
1525}
1526
1527static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1528 int alen, int flags)
1529{
1530 int err = 0;
1531 struct sock *sk = sock->sk;
1532 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001533 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
Changli Gao6503d962010-03-31 22:58:26 +00001535 if (alen < sizeof(addr->sa_family))
1536 return -EINVAL;
1537
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 if (addr->sa_family == AF_UNSPEC) {
1539 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001540 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001541 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 return 0;
1543 }
1544 if (addr->sa_family != AF_NETLINK)
1545 return -EINVAL;
1546
Mike Pecovnik46833a82014-02-24 21:11:16 +01001547 if ((nladdr->nl_groups || nladdr->nl_pid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001548 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 return -EPERM;
1550
Eric W. Biederman15e47302012-09-07 20:12:54 +00001551 if (!nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 err = netlink_autobind(sock);
1553
1554 if (err == 0) {
1555 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001556 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001557 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 }
1559
1560 return err;
1561}
1562
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001563static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1564 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565{
1566 struct sock *sk = sock->sk;
1567 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001568 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 nladdr->nl_family = AF_NETLINK;
1571 nladdr->nl_pad = 0;
1572 *addr_len = sizeof(*nladdr);
1573
1574 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001575 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001576 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001578 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001579 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 }
1581 return 0;
1582}
1583
Eric W. Biederman15e47302012-09-07 20:12:54 +00001584static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 struct sock *sock;
1587 struct netlink_sock *nlk;
1588
Eric W. Biederman15e47302012-09-07 20:12:54 +00001589 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 if (!sock)
1591 return ERR_PTR(-ECONNREFUSED);
1592
1593 /* Don't bother queuing skb if kernel socket has no input function */
1594 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001595 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001596 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 sock_put(sock);
1598 return ERR_PTR(-ECONNREFUSED);
1599 }
1600 return sock;
1601}
1602
1603struct sock *netlink_getsockbyfilp(struct file *filp)
1604{
Al Viro496ad9a2013-01-23 17:07:38 -05001605 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 struct sock *sock;
1607
1608 if (!S_ISSOCK(inode->i_mode))
1609 return ERR_PTR(-ENOTSOCK);
1610
1611 sock = SOCKET_I(inode)->sk;
1612 if (sock->sk_family != AF_NETLINK)
1613 return ERR_PTR(-EINVAL);
1614
1615 sock_hold(sock);
1616 return sock;
1617}
1618
Pablo Neira3a365152013-06-28 03:04:23 +02001619static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1620 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001621{
1622 struct sk_buff *skb;
1623 void *data;
1624
Pablo Neira3a365152013-06-28 03:04:23 +02001625 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001626 return alloc_skb(size, GFP_KERNEL);
1627
Pablo Neira3a365152013-06-28 03:04:23 +02001628 size = SKB_DATA_ALIGN(size) +
1629 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001630
1631 data = vmalloc(size);
1632 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001633 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001634
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001635 skb = __build_skb(data, size);
Pablo Neira3a365152013-06-28 03:04:23 +02001636 if (skb == NULL)
1637 vfree(data);
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001638 else
Pablo Neira3a365152013-06-28 03:04:23 +02001639 skb->destructor = netlink_skb_destructor;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001640
1641 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001642}
1643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644/*
1645 * Attach a skb to a netlink socket.
1646 * The caller must hold a reference to the destination socket. On error, the
1647 * reference is dropped. The skb is not send to the destination, just all
1648 * all error checks are performed and memory in the queue is reserved.
1649 * Return values:
1650 * < 0: error. skb freed, reference to sock dropped.
1651 * 0: continue
1652 * 1: repeat lookup - reference dropped while waiting for socket memory.
1653 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001654int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001655 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
1657 struct netlink_sock *nlk;
1658
1659 nlk = nlk_sk(sk);
1660
Patrick McHardy5fd96122013-04-17 06:47:03 +00001661 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001662 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
Patrick McHardy5fd96122013-04-17 06:47:03 +00001663 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001665 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001666 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 netlink_overrun(sk);
1668 sock_put(sk);
1669 kfree_skb(skb);
1670 return -EAGAIN;
1671 }
1672
1673 __set_current_state(TASK_INTERRUPTIBLE);
1674 add_wait_queue(&nlk->wait, &wait);
1675
1676 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001677 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001679 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 __set_current_state(TASK_RUNNING);
1682 remove_wait_queue(&nlk->wait, &wait);
1683 sock_put(sk);
1684
1685 if (signal_pending(current)) {
1686 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001687 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 }
1689 return 1;
1690 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001691 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 return 0;
1693}
1694
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001695static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 int len = skb->len;
1698
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001699 netlink_deliver_tap(skb);
1700
Patrick McHardyf9c22882013-04-17 06:47:04 +00001701#ifdef CONFIG_NETLINK_MMAP
1702 if (netlink_skb_is_mmaped(skb))
1703 netlink_queue_mmaped_skb(sk, skb);
1704 else if (netlink_rx_is_mmaped(sk))
1705 netlink_ring_set_copied(sk, skb);
1706 else
1707#endif /* CONFIG_NETLINK_MMAP */
1708 skb_queue_tail(&sk->sk_receive_queue, skb);
David S. Miller676d2362014-04-11 16:15:36 -04001709 sk->sk_data_ready(sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001710 return len;
1711}
1712
1713int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1714{
1715 int len = __netlink_sendskb(sk, skb);
1716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 sock_put(sk);
1718 return len;
1719}
1720
1721void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1722{
1723 kfree_skb(skb);
1724 sock_put(sk);
1725}
1726
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001727static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728{
1729 int delta;
1730
Patrick McHardy1298ca42013-04-17 06:46:59 +00001731 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001732 if (netlink_skb_is_mmaped(skb))
1733 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001735 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001736 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 return skb;
1738
1739 if (skb_shared(skb)) {
1740 struct sk_buff *nskb = skb_clone(skb, allocation);
1741 if (!nskb)
1742 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001743 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 skb = nskb;
1745 }
1746
1747 if (!pskb_expand_head(skb, 0, -delta, allocation))
1748 skb->truesize -= delta;
1749
1750 return skb;
1751}
1752
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001753static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1754 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001755{
1756 int ret;
1757 struct netlink_sock *nlk = nlk_sk(sk);
1758
1759 ret = -ECONNREFUSED;
1760 if (nlk->netlink_rcv != NULL) {
1761 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001762 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001763 NETLINK_CB(skb).sk = ssk;
Daniel Borkmann73bfd372013-12-23 14:35:55 +01001764 netlink_deliver_tap_kernel(sk, ssk, skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001765 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001766 consume_skb(skb);
1767 } else {
1768 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001769 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001770 sock_put(sk);
1771 return ret;
1772}
1773
1774int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001775 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
1777 struct sock *sk;
1778 int err;
1779 long timeo;
1780
1781 skb = netlink_trim(skb, gfp_any());
1782
1783 timeo = sock_sndtimeo(ssk, nonblock);
1784retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001785 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 if (IS_ERR(sk)) {
1787 kfree_skb(skb);
1788 return PTR_ERR(sk);
1789 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001790 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001791 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001792
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001793 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001794 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001795 kfree_skb(skb);
1796 sock_put(sk);
1797 return err;
1798 }
1799
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001800 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 if (err == 1)
1802 goto retry;
1803 if (err)
1804 return err;
1805
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001806 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001808EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Patrick McHardyf9c22882013-04-17 06:47:04 +00001810struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1811 u32 dst_portid, gfp_t gfp_mask)
1812{
1813#ifdef CONFIG_NETLINK_MMAP
1814 struct sock *sk = NULL;
1815 struct sk_buff *skb;
1816 struct netlink_ring *ring;
1817 struct nl_mmap_hdr *hdr;
1818 unsigned int maxlen;
1819
1820 sk = netlink_getsockbyportid(ssk, dst_portid);
1821 if (IS_ERR(sk))
1822 goto out;
1823
1824 ring = &nlk_sk(sk)->rx_ring;
1825 /* fast-path without atomic ops for common case: non-mmaped receiver */
1826 if (ring->pg_vec == NULL)
1827 goto out_put;
1828
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001829 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1830 goto out_put;
1831
Patrick McHardyf9c22882013-04-17 06:47:04 +00001832 skb = alloc_skb_head(gfp_mask);
1833 if (skb == NULL)
1834 goto err1;
1835
1836 spin_lock_bh(&sk->sk_receive_queue.lock);
1837 /* check again under lock */
1838 if (ring->pg_vec == NULL)
1839 goto out_free;
1840
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001841 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001842 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1843 if (maxlen < size)
1844 goto out_free;
1845
1846 netlink_forward_ring(ring);
1847 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1848 if (hdr == NULL)
1849 goto err2;
1850 netlink_ring_setup_skb(skb, sk, ring, hdr);
1851 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1852 atomic_inc(&ring->pending);
1853 netlink_increment_head(ring);
1854
1855 spin_unlock_bh(&sk->sk_receive_queue.lock);
1856 return skb;
1857
1858err2:
1859 kfree_skb(skb);
1860 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001861 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001862err1:
1863 sock_put(sk);
1864 return NULL;
1865
1866out_free:
1867 kfree_skb(skb);
1868 spin_unlock_bh(&sk->sk_receive_queue.lock);
1869out_put:
1870 sock_put(sk);
1871out:
1872#endif
1873 return alloc_skb(size, gfp_mask);
1874}
1875EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1876
Patrick McHardy4277a082006-03-20 18:52:01 -08001877int netlink_has_listeners(struct sock *sk, unsigned int group)
1878{
1879 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001880 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001881
Denis V. Lunevaed81562007-10-10 21:14:32 -07001882 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001883
1884 rcu_read_lock();
1885 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1886
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001887 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001888 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001889
1890 rcu_read_unlock();
1891
Patrick McHardy4277a082006-03-20 18:52:01 -08001892 return res;
1893}
1894EXPORT_SYMBOL_GPL(netlink_has_listeners);
1895
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001896static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897{
1898 struct netlink_sock *nlk = nlk_sk(sk);
1899
1900 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001901 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001902 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001903 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001904 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 }
1906 return -1;
1907}
1908
1909struct netlink_broadcast_data {
1910 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001911 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001912 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 u32 group;
1914 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001915 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 int congested;
1917 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001918 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001920 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1921 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922};
1923
Rami Rosen46c95212014-07-01 21:17:35 +03001924static void do_one_broadcast(struct sock *sk,
1925 struct netlink_broadcast_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926{
1927 struct netlink_sock *nlk = nlk_sk(sk);
1928 int val;
1929
1930 if (p->exclude_sk == sk)
Rami Rosen46c95212014-07-01 21:17:35 +03001931 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
Eric W. Biederman15e47302012-09-07 20:12:54 +00001933 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001934 !test_bit(p->group - 1, nlk->groups))
Rami Rosen46c95212014-07-01 21:17:35 +03001935 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001937 if (!net_eq(sock_net(sk), p->net)) {
1938 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1939 return;
1940
1941 if (!peernet_has_id(sock_net(sk), p->net))
1942 return;
1943
1944 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1945 CAP_NET_BROADCAST))
1946 return;
1947 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001948
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 if (p->failure) {
1950 netlink_overrun(sk);
Rami Rosen46c95212014-07-01 21:17:35 +03001951 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 }
1953
1954 sock_hold(sk);
1955 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001956 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 p->skb2 = skb_clone(p->skb, p->allocation);
1958 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001959 p->skb2 = skb_get(p->skb);
1960 /*
1961 * skb ownership may have been set when
1962 * delivered to a previous socket.
1963 */
1964 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 }
1966 }
1967 if (p->skb2 == NULL) {
1968 netlink_overrun(sk);
1969 /* Clone failed. Notify ALL listeners. */
1970 p->failure = 1;
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001971 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001972 p->delivery_failure = 1;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001973 goto out;
1974 }
1975 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001976 kfree_skb(p->skb2);
1977 p->skb2 = NULL;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001978 goto out;
1979 }
1980 if (sk_filter(sk, p->skb2)) {
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001981 kfree_skb(p->skb2);
1982 p->skb2 = NULL;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001983 goto out;
1984 }
1985 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1986 NETLINK_CB(p->skb2).nsid_is_set = true;
1987 val = netlink_broadcast_deliver(sk, p->skb2);
1988 if (val < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 netlink_overrun(sk);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001990 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001991 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 } else {
1993 p->congested |= val;
1994 p->delivered = 1;
1995 p->skb2 = NULL;
1996 }
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001997out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999}
2000
Eric W. Biederman15e47302012-09-07 20:12:54 +00002001int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002002 u32 group, gfp_t allocation,
2003 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2004 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002006 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 struct sock *sk;
2009
2010 skb = netlink_trim(skb, allocation);
2011
2012 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002013 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002014 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 info.group = group;
2016 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002017 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 info.congested = 0;
2019 info.delivered = 0;
2020 info.allocation = allocation;
2021 info.skb = skb;
2022 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002023 info.tx_filter = filter;
2024 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
2026 /* While we sleep in clone, do not allow to change socket list */
2027
2028 netlink_lock_table();
2029
Sasha Levinb67bfe02013-02-27 17:06:00 -08002030 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 do_one_broadcast(sk, &info);
2032
Neil Horman70d4bf62010-07-20 06:45:56 +00002033 consume_skb(skb);
Tommy S. Christensenaa1c6a62005-05-19 13:07:32 -07002034
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 netlink_unlock_table();
2036
Neil Horman70d4bf62010-07-20 06:45:56 +00002037 if (info.delivery_failure) {
2038 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002039 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002040 }
2041 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 if (info.delivered) {
2044 if (info.congested && (allocation & __GFP_WAIT))
2045 yield();
2046 return 0;
2047 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 return -ESRCH;
2049}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002050EXPORT_SYMBOL(netlink_broadcast_filtered);
2051
Eric W. Biederman15e47302012-09-07 20:12:54 +00002052int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002053 u32 group, gfp_t allocation)
2054{
Eric W. Biederman15e47302012-09-07 20:12:54 +00002055 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002056 NULL, NULL);
2057}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002058EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
2060struct netlink_set_err_data {
2061 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002062 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 u32 group;
2064 int code;
2065};
2066
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00002067static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068{
2069 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002070 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
2072 if (sk == p->exclude_sk)
2073 goto out;
2074
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002075 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002076 goto out;
2077
Eric W. Biederman15e47302012-09-07 20:12:54 +00002078 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002079 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 goto out;
2081
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002082 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002083 ret = 1;
2084 goto out;
2085 }
2086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 sk->sk_err = p->code;
2088 sk->sk_error_report(sk);
2089out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002090 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091}
2092
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002093/**
2094 * netlink_set_err - report error to broadcast listeners
2095 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002096 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002097 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002098 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002099 *
2100 * This function returns the number of broadcast listeners that have set the
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002101 * NETLINK_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002102 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002103int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104{
2105 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002107 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
2109 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002110 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002112 /* sk->sk_err wants a positive error value */
2113 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
2115 read_lock(&nl_table_lock);
2116
Sasha Levinb67bfe02013-02-27 17:06:00 -08002117 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002118 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002121 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002123EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Johannes Berg84659eb2007-07-18 15:47:05 -07002125/* must be called with netlink table grabbed */
2126static void netlink_update_socket_mc(struct netlink_sock *nlk,
2127 unsigned int group,
2128 int is_new)
2129{
2130 int old, new = !!is_new, subscriptions;
2131
2132 old = test_bit(group - 1, nlk->groups);
2133 subscriptions = nlk->subscriptions - old + new;
2134 if (new)
2135 __set_bit(group - 1, nlk->groups);
2136 else
2137 __clear_bit(group - 1, nlk->groups);
2138 netlink_update_subscriptions(&nlk->sk, subscriptions);
2139 netlink_update_listeners(&nlk->sk);
2140}
2141
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002142static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002143 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002144{
2145 struct sock *sk = sock->sk;
2146 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002147 unsigned int val = 0;
2148 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002149
2150 if (level != SOL_NETLINK)
2151 return -ENOPROTOOPT;
2152
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002153 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2154 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002155 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002156 return -EFAULT;
2157
2158 switch (optname) {
2159 case NETLINK_PKTINFO:
2160 if (val)
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002161 nlk->flags |= NETLINK_F_RECV_PKTINFO;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002162 else
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002163 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002164 err = 0;
2165 break;
2166 case NETLINK_ADD_MEMBERSHIP:
2167 case NETLINK_DROP_MEMBERSHIP: {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002168 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002169 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002170 err = netlink_realloc_groups(sk);
2171 if (err)
2172 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002173 if (!val || val - 1 >= nlk->ngroups)
2174 return -EINVAL;
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002175 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01002176 err = nlk->netlink_bind(sock_net(sk), val);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04002177 if (err)
2178 return err;
2179 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002180 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002181 netlink_update_socket_mc(nlk, val,
2182 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002183 netlink_table_ungrab();
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002184 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
Johannes Berg023e2cf2014-12-23 21:00:06 +01002185 nlk->netlink_unbind(sock_net(sk), val);
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002186
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002187 err = 0;
2188 break;
2189 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002190 case NETLINK_BROADCAST_ERROR:
2191 if (val)
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002192 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002193 else
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002194 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002195 err = 0;
2196 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002197 case NETLINK_NO_ENOBUFS:
2198 if (val) {
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002199 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2200 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002201 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002202 } else {
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002203 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002204 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002205 err = 0;
2206 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002207#ifdef CONFIG_NETLINK_MMAP
2208 case NETLINK_RX_RING:
2209 case NETLINK_TX_RING: {
2210 struct nl_mmap_req req;
2211
2212 /* Rings might consume more memory than queue limits, require
2213 * CAP_NET_ADMIN.
2214 */
2215 if (!capable(CAP_NET_ADMIN))
2216 return -EPERM;
2217 if (optlen < sizeof(req))
2218 return -EINVAL;
2219 if (copy_from_user(&req, optval, sizeof(req)))
2220 return -EFAULT;
2221 err = netlink_set_ring(sk, &req, false,
2222 optname == NETLINK_TX_RING);
2223 break;
2224 }
2225#endif /* CONFIG_NETLINK_MMAP */
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002226 case NETLINK_LISTEN_ALL_NSID:
2227 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2228 return -EPERM;
2229
2230 if (val)
2231 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
2232 else
2233 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
2234 err = 0;
2235 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002236 default:
2237 err = -ENOPROTOOPT;
2238 }
2239 return err;
2240}
2241
2242static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002243 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002244{
2245 struct sock *sk = sock->sk;
2246 struct netlink_sock *nlk = nlk_sk(sk);
2247 int len, val, err;
2248
2249 if (level != SOL_NETLINK)
2250 return -ENOPROTOOPT;
2251
2252 if (get_user(len, optlen))
2253 return -EFAULT;
2254 if (len < 0)
2255 return -EINVAL;
2256
2257 switch (optname) {
2258 case NETLINK_PKTINFO:
2259 if (len < sizeof(int))
2260 return -EINVAL;
2261 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002262 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002263 if (put_user(len, optlen) ||
2264 put_user(val, optval))
2265 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002266 err = 0;
2267 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002268 case NETLINK_BROADCAST_ERROR:
2269 if (len < sizeof(int))
2270 return -EINVAL;
2271 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002272 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002273 if (put_user(len, optlen) ||
2274 put_user(val, optval))
2275 return -EFAULT;
2276 err = 0;
2277 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002278 case NETLINK_NO_ENOBUFS:
2279 if (len < sizeof(int))
2280 return -EINVAL;
2281 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002282 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002283 if (put_user(len, optlen) ||
2284 put_user(val, optval))
2285 return -EFAULT;
2286 err = 0;
2287 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002288 default:
2289 err = -ENOPROTOOPT;
2290 }
2291 return err;
2292}
2293
2294static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2295{
2296 struct nl_pktinfo info;
2297
2298 info.group = NETLINK_CB(skb).dst_group;
2299 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2300}
2301
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002302static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
2303 struct sk_buff *skb)
2304{
2305 if (!NETLINK_CB(skb).nsid_is_set)
2306 return;
2307
2308 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
2309 &NETLINK_CB(skb).nsid);
2310}
2311
Ying Xue1b784142015-03-02 15:37:48 +08002312static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 struct sock *sk = sock->sk;
2315 struct netlink_sock *nlk = nlk_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002316 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002317 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002318 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 struct sk_buff *skb;
2320 int err;
2321 struct scm_cookie scm;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002322 u32 netlink_skb_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
2324 if (msg->msg_flags&MSG_OOB)
2325 return -EOPNOTSUPP;
2326
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002327 err = scm_send(sock, msg, &scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 if (err < 0)
2329 return err;
2330
2331 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002332 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002334 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002335 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002336 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002337 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002338 if ((dst_group || dst_portid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002339 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002340 goto out;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002341 netlink_skb_flags |= NETLINK_SKB_DST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002343 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002344 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 }
2346
Eric W. Biederman15e47302012-09-07 20:12:54 +00002347 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 err = netlink_autobind(sock);
2349 if (err)
2350 goto out;
2351 }
2352
Al Viroa8866ff2014-12-12 23:02:36 -05002353 /* It's a really convoluted way for userland to ask for mmaped
2354 * sendmsg(), but that's what we've got...
2355 */
Patrick McHardy5fd96122013-04-17 06:47:03 +00002356 if (netlink_tx_is_mmaped(sk) &&
Al Viroa8866ff2014-12-12 23:02:36 -05002357 msg->msg_iter.type == ITER_IOVEC &&
2358 msg->msg_iter.nr_segs == 1 &&
Al Viroc0371da2014-11-24 10:42:55 -05002359 msg->msg_iter.iov->iov_base == NULL) {
Patrick McHardy5fd96122013-04-17 06:47:03 +00002360 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002361 &scm);
Patrick McHardy5fd96122013-04-17 06:47:03 +00002362 goto out;
2363 }
2364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 err = -EMSGSIZE;
2366 if (len > sk->sk_sndbuf - 32)
2367 goto out;
2368 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002369 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002370 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 goto out;
2372
Eric W. Biederman15e47302012-09-07 20:12:54 +00002373 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002374 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002375 NETLINK_CB(skb).creds = scm.creds;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002376 NETLINK_CB(skb).flags = netlink_skb_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 err = -EFAULT;
Al Viro6ce8e9c2014-04-06 21:25:44 -04002379 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 kfree_skb(skb);
2381 goto out;
2382 }
2383
2384 err = security_netlink_send(sk, skb);
2385 if (err) {
2386 kfree_skb(skb);
2387 goto out;
2388 }
2389
Patrick McHardyd629b832005-08-14 19:27:50 -07002390 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002392 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002394 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
2396out:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002397 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 return err;
2399}
2400
Ying Xue1b784142015-03-02 15:37:48 +08002401static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 int flags)
2403{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 struct scm_cookie scm;
2405 struct sock *sk = sock->sk;
2406 struct netlink_sock *nlk = nlk_sk(sk);
2407 int noblock = flags&MSG_DONTWAIT;
2408 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002409 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002410 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
2412 if (flags&MSG_OOB)
2413 return -EOPNOTSUPP;
2414
2415 copied = 0;
2416
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002417 skb = skb_recv_datagram(sk, flags, noblock, &err);
2418 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 goto out;
2420
Johannes Berg68d6ac62010-08-15 21:20:44 +00002421 data_skb = skb;
2422
Johannes Berg1dacc762009-07-01 11:26:02 +00002423#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2424 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002425 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002426 * If this skb has a frag_list, then here that means that we
2427 * will have to use the frag_list skb's data for compat tasks
2428 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002429 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002430 * If we need to send the compat skb, assign it to the
2431 * 'data_skb' variable so that it will be used below for data
2432 * copying. We keep 'skb' for everything else, including
2433 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002434 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002435 if (flags & MSG_CMSG_COMPAT)
2436 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002437 }
2438#endif
2439
Eric Dumazet9063e212014-03-07 12:02:33 -08002440 /* Record the max length of recvmsg() calls for future allocations */
2441 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2442 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2443 16384);
2444
Johannes Berg68d6ac62010-08-15 21:20:44 +00002445 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 if (len < copied) {
2447 msg->msg_flags |= MSG_TRUNC;
2448 copied = len;
2449 }
2450
Johannes Berg68d6ac62010-08-15 21:20:44 +00002451 skb_reset_transport_header(data_skb);
David S. Miller51f3d022014-11-05 16:46:40 -05002452 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
2454 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002455 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 addr->nl_family = AF_NETLINK;
2457 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002458 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002459 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 msg->msg_namelen = sizeof(*addr);
2461 }
2462
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002463 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002464 netlink_cmsg_recv_pktinfo(msg, skb);
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002465 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2466 netlink_cmsg_listen_all_nsid(sk, msg, skb);
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002467
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002468 memset(&scm, 0, sizeof(scm));
2469 scm.creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002470 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002471 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002472
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 skb_free_datagram(sk, skb);
2474
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002475 if (nlk->cb_running &&
2476 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002477 ret = netlink_dump(sk);
2478 if (ret) {
Ben Pfaffac30ef82014-07-09 10:31:22 -07002479 sk->sk_err = -ret;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002480 sk->sk_error_report(sk);
2481 }
2482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002484 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485out:
2486 netlink_rcv_wake(sk);
2487 return err ? : copied;
2488}
2489
David S. Miller676d2362014-04-11 16:15:36 -04002490static void netlink_data_ready(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002492 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493}
2494
2495/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002496 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 * complete set of kernel non-blocking support for message
2498 * queueing.
2499 */
2500
2501struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002502__netlink_kernel_create(struct net *net, int unit, struct module *module,
2503 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504{
2505 struct socket *sock;
2506 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002507 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002508 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002509 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2510 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002512 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002514 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 return NULL;
2516
2517 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2518 return NULL;
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002519 /*
2520 * We have to just have a reference on the net from sk, but don't
2521 * get_net it. Besides, we cannot get and then put the net here.
2522 * So we create one inside init_net and the move it to net.
2523 */
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05002524 if (__netlink_create(&init_net, sock, cb_mutex, unit, 0) < 0)
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002525 goto out_sock_release_nosk;
2526
2527 sk = sock->sk;
Denis V. Lunevedf02082008-02-29 11:18:32 -08002528 sk_change_net(sk, net);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002529
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002530 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002531 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002532 else
2533 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002534
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002535 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002536 if (!listeners)
2537 goto out_sock_release;
2538
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002540 if (cfg && cfg->input)
2541 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
Herbert Xu8ea65f42015-01-26 14:02:56 +11002543 if (netlink_insert(sk, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002544 goto out_sock_release;
2545
2546 nlk = nlk_sk(sk);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002547 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002548
2549 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002550 if (!nl_table[unit].registered) {
2551 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002552 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002553 nl_table[unit].cb_mutex = cb_mutex;
2554 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002555 if (cfg) {
2556 nl_table[unit].bind = cfg->bind;
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09002557 nl_table[unit].unbind = cfg->unbind;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002558 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002559 if (cfg->compare)
2560 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002561 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002562 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002563 } else {
2564 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002565 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002566 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002567 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002568 return sk;
2569
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002570out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002571 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002572 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002573 return NULL;
2574
2575out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002576 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002577 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002579EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002580
2581void
2582netlink_kernel_release(struct sock *sk)
2583{
Denis V. Lunevedf02082008-02-29 11:18:32 -08002584 sk_release_kernel(sk);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002585}
2586EXPORT_SYMBOL(netlink_kernel_release);
2587
Johannes Bergd136f1b2009-09-12 03:03:15 +00002588int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002589{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002590 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002591 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002592
2593 if (groups < 32)
2594 groups = 32;
2595
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002596 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002597 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2598 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002599 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002600 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002601 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2602 rcu_assign_pointer(tbl->listeners, new);
2603
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002604 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002605 }
2606 tbl->groups = groups;
2607
Johannes Bergd136f1b2009-09-12 03:03:15 +00002608 return 0;
2609}
2610
2611/**
2612 * netlink_change_ngroups - change number of multicast groups
2613 *
2614 * This changes the number of multicast groups that are available
2615 * on a certain netlink family. Note that it is not possible to
2616 * change the number of groups to below 32. Also note that it does
2617 * not implicitly call netlink_clear_multicast_users() when the
2618 * number of groups is reduced.
2619 *
2620 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2621 * @groups: The new number of groups.
2622 */
2623int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2624{
2625 int err;
2626
2627 netlink_table_grab();
2628 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002629 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002630
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002631 return err;
2632}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002633
Johannes Bergb8273572009-09-24 15:44:05 -07002634void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2635{
2636 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002637 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2638
Sasha Levinb67bfe02013-02-27 17:06:00 -08002639 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002640 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2641}
2642
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002643struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002644__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002645{
2646 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002647 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002648
Wang Yufen23b45672014-02-17 16:53:32 +08002649 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002650 nlh->nlmsg_type = type;
2651 nlh->nlmsg_len = size;
2652 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002653 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002654 nlh->nlmsg_seq = seq;
2655 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002656 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002657 return nlh;
2658}
2659EXPORT_SYMBOL(__nlmsg_put);
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661/*
2662 * It looks a bit ugly.
2663 * It would be better to create kernel thread.
2664 */
2665
2666static int netlink_dump(struct sock *sk)
2667{
2668 struct netlink_sock *nlk = nlk_sk(sk);
2669 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002670 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 struct nlmsghdr *nlh;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002672 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002673 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002675 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002676 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002677 err = -EINVAL;
2678 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 }
2680
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002681 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002682 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2683
Patrick McHardyf9c22882013-04-17 06:47:04 +00002684 if (!netlink_rx_is_mmaped(sk) &&
2685 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2686 goto errout_skb;
Eric Dumazet9063e212014-03-07 12:02:33 -08002687
2688 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2689 * required, but it makes sense to _attempt_ a 16K bytes allocation
2690 * to reduce number of system calls on dump operations, if user
2691 * ever provided a big enough buffer.
2692 */
2693 if (alloc_size < nlk->max_recvmsg_len) {
2694 skb = netlink_alloc_skb(sk,
2695 nlk->max_recvmsg_len,
2696 nlk->portid,
2697 GFP_KERNEL |
2698 __GFP_NOWARN |
2699 __GFP_NORETRY);
2700 /* available room should be exact amount to avoid MSG_TRUNC */
2701 if (skb)
2702 skb_reserve(skb, skb_tailroom(skb) -
2703 nlk->max_recvmsg_len);
2704 }
2705 if (!skb)
2706 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2707 GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002708 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002709 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002710 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002711
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 len = cb->dump(skb, cb);
2713
2714 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002715 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002716
2717 if (sk_filter(sk, skb))
2718 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002719 else
2720 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 return 0;
2722 }
2723
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002724 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2725 if (!nlh)
2726 goto errout_skb;
2727
Johannes Berg670dc282011-06-20 13:40:46 +02002728 nl_dump_check_consistent(cb, nlh);
2729
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002730 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2731
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002732 if (sk_filter(sk, skb))
2733 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002734 else
2735 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
Thomas Grafa8f74b22005-11-10 02:25:52 +01002737 if (cb->done)
2738 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002740 nlk->cb_running = false;
2741 mutex_unlock(nlk->cb_mutex);
Gao feng6dc878a2012-10-04 20:15:48 +00002742 module_put(cb->module);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002743 consume_skb(cb->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002745
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002746errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002747 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002748 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002749 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750}
2751
Gao feng6dc878a2012-10-04 20:15:48 +00002752int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2753 const struct nlmsghdr *nlh,
2754 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755{
2756 struct netlink_callback *cb;
2757 struct sock *sk;
2758 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002759 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
Patrick McHardyf9c22882013-04-17 06:47:04 +00002761 /* Memory mapped dump requests need to be copied to avoid looping
2762 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2763 * a reference to the skb.
2764 */
2765 if (netlink_skb_is_mmaped(skb)) {
2766 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002767 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002768 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002769 } else
2770 atomic_inc(&skb->users);
2771
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002772 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2773 if (sk == NULL) {
2774 ret = -ECONNREFUSED;
2775 goto error_free;
2776 }
2777
2778 nlk = nlk_sk(sk);
2779 mutex_lock(nlk->cb_mutex);
2780 /* A dump is in progress... */
2781 if (nlk->cb_running) {
2782 ret = -EBUSY;
2783 goto error_unlock;
2784 }
2785 /* add reference of module which cb->dump belongs to */
2786 if (!try_module_get(control->module)) {
2787 ret = -EPROTONOSUPPORT;
2788 goto error_unlock;
2789 }
2790
2791 cb = &nlk->cb;
2792 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002793 cb->dump = control->dump;
2794 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002796 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002797 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002798 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 cb->skb = skb;
2800
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002801 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002802
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002803 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
Andrey Vaginb44d2112011-02-21 02:40:47 +00002805 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002807
Andrey Vaginb44d2112011-02-21 02:40:47 +00002808 if (ret)
2809 return ret;
2810
Denis V. Lunev5c582982007-10-23 20:29:25 -07002811 /* We successfully started a dump, by returning -EINTR we
2812 * signal not to send ACK even if it was requested.
2813 */
2814 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002815
2816error_unlock:
2817 sock_put(sk);
2818 mutex_unlock(nlk->cb_mutex);
2819error_free:
2820 kfree_skb(skb);
2821 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822}
Gao feng6dc878a2012-10-04 20:15:48 +00002823EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824
2825void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2826{
2827 struct sk_buff *skb;
2828 struct nlmsghdr *rep;
2829 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002830 size_t payload = sizeof(*errmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831
Thomas Graf339bf982006-11-10 14:10:15 -08002832 /* error messages get the original request appened */
2833 if (err)
2834 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835
Patrick McHardyf9c22882013-04-17 06:47:04 +00002836 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2837 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 if (!skb) {
2839 struct sock *sk;
2840
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002841 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002842 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002843 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 if (sk) {
2845 sk->sk_err = ENOBUFS;
2846 sk->sk_error_report(sk);
2847 sock_put(sk);
2848 }
2849 return;
2850 }
2851
Eric W. Biederman15e47302012-09-07 20:12:54 +00002852 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002853 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002854 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 errmsg->error = err;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002856 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002857 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002859EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002861int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002862 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002863{
Thomas Graf82ace472005-11-10 02:25:53 +01002864 struct nlmsghdr *nlh;
2865 int err;
2866
2867 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002868 int msglen;
2869
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002870 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002871 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002872
Martin Murrayad8e4b72006-01-10 13:02:29 -08002873 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002874 return 0;
2875
Thomas Grafd35b6852007-03-22 23:28:46 -07002876 /* Only requests are handled by the kernel */
2877 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002878 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002879
Thomas Graf45e7ae72007-03-22 23:29:10 -07002880 /* Skip control messages */
2881 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002882 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002883
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002884 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002885 if (err == -EINTR)
2886 goto skip;
2887
2888ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002889 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002890 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002891
Denis V. Lunev5c582982007-10-23 20:29:25 -07002892skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002893 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002894 if (msglen > skb->len)
2895 msglen = skb->len;
2896 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002897 }
2898
2899 return 0;
2900}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002901EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002902
2903/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002904 * nlmsg_notify - send a notification netlink message
2905 * @sk: netlink socket to use
2906 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002907 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002908 * @group: destination multicast group or 0
2909 * @report: 1 to report back, 0 to disable
2910 * @flags: allocation flags
2911 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002912int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002913 unsigned int group, int report, gfp_t flags)
2914{
2915 int err = 0;
2916
2917 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002918 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002919
2920 if (report) {
2921 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002922 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002923 }
2924
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002925 /* errors reported via destination sk->sk_err, but propagate
2926 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002927 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002928 }
2929
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002930 if (report) {
2931 int err2;
2932
Eric W. Biederman15e47302012-09-07 20:12:54 +00002933 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002934 if (!err || err == -ESRCH)
2935 err = err2;
2936 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07002937
2938 return err;
2939}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002940EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002941
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942#ifdef CONFIG_PROC_FS
2943struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08002944 struct seq_net_private p;
Herbert Xu56d28b12015-02-04 07:33:24 +11002945 struct rhashtable_iter hti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 int link;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947};
2948
Herbert Xu56d28b12015-02-04 07:33:24 +11002949static int netlink_walk_start(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950{
Herbert Xu56d28b12015-02-04 07:33:24 +11002951 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952
Herbert Xu56d28b12015-02-04 07:33:24 +11002953 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
2954 if (err) {
2955 iter->link = MAX_LINKS;
2956 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 }
Herbert Xu56d28b12015-02-04 07:33:24 +11002958
2959 err = rhashtable_walk_start(&iter->hti);
2960 return err == -EAGAIN ? 0 : err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961}
2962
Herbert Xu56d28b12015-02-04 07:33:24 +11002963static void netlink_walk_stop(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964{
Herbert Xu56d28b12015-02-04 07:33:24 +11002965 rhashtable_walk_stop(&iter->hti);
2966 rhashtable_walk_exit(&iter->hti);
2967}
2968
2969static void *__netlink_seq_next(struct seq_file *seq)
2970{
2971 struct nl_seq_iter *iter = seq->private;
2972 struct netlink_sock *nlk;
2973
2974 do {
2975 for (;;) {
2976 int err;
2977
2978 nlk = rhashtable_walk_next(&iter->hti);
2979
2980 if (IS_ERR(nlk)) {
2981 if (PTR_ERR(nlk) == -EAGAIN)
2982 continue;
2983
2984 return nlk;
2985 }
2986
2987 if (nlk)
2988 break;
2989
2990 netlink_walk_stop(iter);
2991 if (++iter->link >= MAX_LINKS)
2992 return NULL;
2993
2994 err = netlink_walk_start(iter);
2995 if (err)
2996 return ERR_PTR(err);
2997 }
2998 } while (sock_net(&nlk->sk) != seq_file_net(seq));
2999
3000 return nlk;
3001}
3002
3003static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3004{
3005 struct nl_seq_iter *iter = seq->private;
3006 void *obj = SEQ_START_TOKEN;
3007 loff_t pos;
3008 int err;
3009
3010 iter->link = 0;
3011
3012 err = netlink_walk_start(iter);
3013 if (err)
3014 return ERR_PTR(err);
3015
3016 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3017 obj = __netlink_seq_next(seq);
3018
3019 return obj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020}
3021
3022static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3023{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 ++*pos;
Herbert Xu56d28b12015-02-04 07:33:24 +11003025 return __netlink_seq_next(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026}
3027
3028static void netlink_seq_stop(struct seq_file *seq, void *v)
3029{
Herbert Xu56d28b12015-02-04 07:33:24 +11003030 struct nl_seq_iter *iter = seq->private;
3031
3032 if (iter->link >= MAX_LINKS)
3033 return;
3034
3035 netlink_walk_stop(iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036}
3037
3038
3039static int netlink_seq_show(struct seq_file *seq, void *v)
3040{
Eric Dumazet658cb352012-04-22 21:30:21 +00003041 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 seq_puts(seq,
3043 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003044 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00003045 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 struct sock *s = v;
3047 struct netlink_sock *nlk = nlk_sk(s);
3048
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003049 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 s,
3051 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003052 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07003053 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07003054 sk_rmem_alloc_get(s),
3055 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003056 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07003057 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003058 atomic_read(&s->sk_drops),
3059 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 );
3061
3062 }
3063 return 0;
3064}
3065
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003066static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 .start = netlink_seq_start,
3068 .next = netlink_seq_next,
3069 .stop = netlink_seq_stop,
3070 .show = netlink_seq_show,
3071};
3072
3073
3074static int netlink_seq_open(struct inode *inode, struct file *file)
3075{
Denis V. Luneve372c412007-11-19 22:31:54 -08003076 return seq_open_net(inode, file, &netlink_seq_ops,
3077 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003078}
3079
Arjan van de Venda7071d2007-02-12 00:55:36 -08003080static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 .owner = THIS_MODULE,
3082 .open = netlink_seq_open,
3083 .read = seq_read,
3084 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003085 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086};
3087
3088#endif
3089
3090int netlink_register_notifier(struct notifier_block *nb)
3091{
Alan Sterne041c682006-03-27 01:16:30 -08003092 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003094EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095
3096int netlink_unregister_notifier(struct notifier_block *nb)
3097{
Alan Sterne041c682006-03-27 01:16:30 -08003098 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003100EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003101
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003102static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 .family = PF_NETLINK,
3104 .owner = THIS_MODULE,
3105 .release = netlink_release,
3106 .bind = netlink_bind,
3107 .connect = netlink_connect,
3108 .socketpair = sock_no_socketpair,
3109 .accept = sock_no_accept,
3110 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003111 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112 .ioctl = sock_no_ioctl,
3113 .listen = sock_no_listen,
3114 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003115 .setsockopt = netlink_setsockopt,
3116 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 .sendmsg = netlink_sendmsg,
3118 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003119 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 .sendpage = sock_no_sendpage,
3121};
3122
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003123static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 .family = PF_NETLINK,
3125 .create = netlink_create,
3126 .owner = THIS_MODULE, /* for consistency 8) */
3127};
3128
Pavel Emelyanov46650792007-10-08 20:38:39 -07003129static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003130{
3131#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003132 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003133 return -ENOMEM;
3134#endif
3135 return 0;
3136}
3137
Pavel Emelyanov46650792007-10-08 20:38:39 -07003138static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003139{
3140#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003141 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003142#endif
3143}
3144
David S. Millerb963ea82010-08-30 19:08:01 -07003145static void __init netlink_add_usersock_entry(void)
3146{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003147 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003148 int groups = 32;
3149
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003150 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003151 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003152 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003153
3154 netlink_table_grab();
3155
3156 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003157 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003158 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3159 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003160 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003161
3162 netlink_table_ungrab();
3163}
3164
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003165static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003166 .init = netlink_net_init,
3167 .exit = netlink_net_exit,
3168};
3169
Patrick McHardy49f7b332015-03-25 13:07:45 +00003170static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
Herbert Xuc428ecd2015-03-20 21:57:01 +11003171{
3172 const struct netlink_sock *nlk = data;
3173 struct netlink_compare_arg arg;
3174
3175 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
Herbert Xu11b58ba2015-03-24 00:50:22 +11003176 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
Herbert Xuc428ecd2015-03-20 21:57:01 +11003177}
3178
3179static const struct rhashtable_params netlink_rhashtable_params = {
3180 .head_offset = offsetof(struct netlink_sock, node),
3181 .key_len = netlink_compare_arg_len,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003182 .obj_hashfn = netlink_hash,
3183 .obj_cmpfn = netlink_compare,
3184 .max_size = 65536,
Thomas Grafb5e2c152015-03-24 20:42:19 +00003185 .automatic_shrinking = true,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003186};
3187
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188static int __init netlink_proto_init(void)
3189{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 int err = proto_register(&netlink_proto, 0);
3192
3193 if (err != 0)
3194 goto out;
3195
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003196 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003198 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003199 if (!nl_table)
3200 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 for (i = 0; i < MAX_LINKS; i++) {
Herbert Xuc428ecd2015-03-20 21:57:01 +11003203 if (rhashtable_init(&nl_table[i].hash,
3204 &netlink_rhashtable_params) < 0) {
Thomas Grafe3416942014-08-02 11:47:45 +02003205 while (--i > 0)
3206 rhashtable_destroy(&nl_table[i].hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003208 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 }
3211
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003212 INIT_LIST_HEAD(&netlink_tap_all);
3213
David S. Millerb963ea82010-08-30 19:08:01 -07003214 netlink_add_usersock_entry();
3215
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003217 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003218 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219 rtnetlink_init();
3220out:
3221 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003222panic:
3223 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224}
3225
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226core_initcall(netlink_proto_init);