blob: 173817a5dfad0690446735c975653618f3f51974 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Thomas Grafe3416942014-08-02 11:47:45 +020061#include <linux/rhashtable.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000062#include <asm/cacheflush.h>
Thomas Grafe3416942014-08-02 11:47:45 +020063#include <linux/hash.h>
Johannes Bergee1c24422015-01-16 11:37:14 +010064#include <linux/genetlink.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/sock.h>
68#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010069#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Andrey Vagin0f29c762013-03-21 20:33:47 +040071#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Eric Dumazet5c398dc2010-10-24 04:27:10 +000073struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000076};
77
Patrick McHardycd967e02013-04-17 06:46:56 +000078/* state bits */
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020079#define NETLINK_S_CONGESTED 0x0
Patrick McHardycd967e02013-04-17 06:46:56 +000080
81/* flags */
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020082#define NETLINK_F_KERNEL_SOCKET 0x1
83#define NETLINK_F_RECV_PKTINFO 0x2
84#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85#define NETLINK_F_RECV_NO_ENOBUFS 0x8
Nicolas Dichtel59324cf2015-05-07 11:02:53 +020086#define NETLINK_F_LISTEN_ALL_NSID 0x10
Christophe Ricard0a6a3a22015-08-28 07:07:48 +020087#define NETLINK_F_CAP_ACK 0x20
Patrick McHardy77247bb2005-08-14 19:27:13 -070088
David S. Miller035c4c12011-12-23 17:33:03 -050089static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070090{
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020091 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
Denis V. Lunevaed81562007-10-10 21:14:32 -070092}
93
Eric Dumazet91dd93f2015-05-12 17:24:50 -070094struct netlink_table *nl_table __read_mostly;
Andrey Vagin0f29c762013-03-21 20:33:47 +040095EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
98
99static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +0000100static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Thomas Graf78fd1d02014-10-21 22:05:38 +0200102/* nl_table locking explained:
Thomas Graf21e49022015-01-02 23:00:22 +0100103 * Lookup and traversal are protected with an RCU read-side lock. Insertion
Ying Xuec5adde92015-01-12 14:52:23 +0800104 * and removal are protected with per bucket lock while using RCU list
Thomas Graf21e49022015-01-02 23:00:22 +0100105 * modification primitives and may run in parallel to RCU protected lookups.
106 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
107 * been acquired * either during or after the socket has been removed from
108 * the list and after an RCU grace period.
Thomas Graf78fd1d02014-10-21 22:05:38 +0200109 */
Andrey Vagin0f29c762013-03-21 20:33:47 +0400110DEFINE_RWLOCK(nl_table_lock);
111EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static atomic_t nl_table_users = ATOMIC_INIT(0);
113
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000114#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
115
Alan Sterne041c682006-03-27 01:16:30 -0800116static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200118static DEFINE_SPINLOCK(netlink_tap_lock);
119static struct list_head netlink_tap_all __read_mostly;
120
Herbert Xuc428ecd2015-03-20 21:57:01 +1100121static const struct rhashtable_params netlink_rhashtable_params;
122
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000123static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700124{
125 return group ? 1 << (group - 1) : 0;
126}
127
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200128int netlink_add_tap(struct netlink_tap *nt)
129{
130 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
131 return -EINVAL;
132
133 spin_lock(&netlink_tap_lock);
134 list_add_rcu(&nt->list, &netlink_tap_all);
135 spin_unlock(&netlink_tap_lock);
136
Markus Elfringfcd4d352014-11-18 21:03:13 +0100137 __module_get(nt->module);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200138
139 return 0;
140}
141EXPORT_SYMBOL_GPL(netlink_add_tap);
142
stephen hemminger2173f8d2013-12-30 10:49:22 -0800143static int __netlink_remove_tap(struct netlink_tap *nt)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200144{
145 bool found = false;
146 struct netlink_tap *tmp;
147
148 spin_lock(&netlink_tap_lock);
149
150 list_for_each_entry(tmp, &netlink_tap_all, list) {
151 if (nt == tmp) {
152 list_del_rcu(&nt->list);
153 found = true;
154 goto out;
155 }
156 }
157
158 pr_warn("__netlink_remove_tap: %p not found\n", nt);
159out:
160 spin_unlock(&netlink_tap_lock);
161
Markus Elfring92b80eb2015-07-02 18:38:12 +0200162 if (found)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200163 module_put(nt->module);
164
165 return found ? 0 : -ENODEV;
166}
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200167
168int netlink_remove_tap(struct netlink_tap *nt)
169{
170 int ret;
171
172 ret = __netlink_remove_tap(nt);
173 synchronize_net();
174
175 return ret;
176}
177EXPORT_SYMBOL_GPL(netlink_remove_tap);
178
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200179static bool netlink_filter_tap(const struct sk_buff *skb)
180{
181 struct sock *sk = skb->sk;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200182
183 /* We take the more conservative approach and
184 * whitelist socket protocols that may pass.
185 */
186 switch (sk->sk_protocol) {
187 case NETLINK_ROUTE:
188 case NETLINK_USERSOCK:
189 case NETLINK_SOCK_DIAG:
190 case NETLINK_NFLOG:
191 case NETLINK_XFRM:
192 case NETLINK_FIB_LOOKUP:
193 case NETLINK_NETFILTER:
194 case NETLINK_GENERIC:
Varka Bhadram498044b2014-07-16 10:59:47 +0530195 return true;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200196 }
197
Varka Bhadram498044b2014-07-16 10:59:47 +0530198 return false;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200199}
200
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200201static int __netlink_deliver_tap_skb(struct sk_buff *skb,
202 struct net_device *dev)
203{
204 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200205 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200206 int ret = -ENOMEM;
207
208 dev_hold(dev);
209 nskb = skb_clone(skb, GFP_ATOMIC);
210 if (nskb) {
211 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200212 nskb->protocol = htons((u16) sk->sk_protocol);
Daniel Borkmann604d13c2013-12-23 14:35:56 +0100213 nskb->pkt_type = netlink_is_kernel(sk) ?
214 PACKET_KERNEL : PACKET_USER;
Daniel Borkmann4e48ed82014-08-07 22:22:47 +0200215 skb_reset_network_header(nskb);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200216 ret = dev_queue_xmit(nskb);
217 if (unlikely(ret > 0))
218 ret = net_xmit_errno(ret);
219 }
220
221 dev_put(dev);
222 return ret;
223}
224
225static void __netlink_deliver_tap(struct sk_buff *skb)
226{
227 int ret;
228 struct netlink_tap *tmp;
229
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200230 if (!netlink_filter_tap(skb))
231 return;
232
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200233 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
234 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
235 if (unlikely(ret))
236 break;
237 }
238}
239
240static void netlink_deliver_tap(struct sk_buff *skb)
241{
242 rcu_read_lock();
243
244 if (unlikely(!list_empty(&netlink_tap_all)))
245 __netlink_deliver_tap(skb);
246
247 rcu_read_unlock();
248}
249
Daniel Borkmann73bfd372013-12-23 14:35:55 +0100250static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
251 struct sk_buff *skb)
252{
253 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
254 netlink_deliver_tap(skb);
255}
256
Patrick McHardycd1df522013-04-17 06:47:05 +0000257static void netlink_overrun(struct sock *sk)
258{
259 struct netlink_sock *nlk = nlk_sk(sk);
260
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +0200261 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
262 if (!test_and_set_bit(NETLINK_S_CONGESTED,
263 &nlk_sk(sk)->state)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000264 sk->sk_err = ENOBUFS;
265 sk->sk_error_report(sk);
266 }
267 }
268 atomic_inc(&sk->sk_drops);
269}
270
271static void netlink_rcv_wake(struct sock *sk)
272{
273 struct netlink_sock *nlk = nlk_sk(sk);
274
275 if (skb_queue_empty(&sk->sk_receive_queue))
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +0200276 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
277 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
Patrick McHardycd1df522013-04-17 06:47:05 +0000278 wake_up_interruptible(&nlk->wait);
279}
280
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000281#ifdef CONFIG_NETLINK_MMAP
Patrick McHardy9652e932013-04-17 06:47:02 +0000282static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
283{
284 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
285}
286
Patrick McHardyf9c22882013-04-17 06:47:04 +0000287static bool netlink_rx_is_mmaped(struct sock *sk)
288{
289 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
290}
291
Patrick McHardy5fd96122013-04-17 06:47:03 +0000292static bool netlink_tx_is_mmaped(struct sock *sk)
293{
294 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
295}
296
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000297static __pure struct page *pgvec_to_page(const void *addr)
298{
299 if (is_vmalloc_addr(addr))
300 return vmalloc_to_page(addr);
301 else
302 return virt_to_page(addr);
303}
304
305static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
306{
307 unsigned int i;
308
309 for (i = 0; i < len; i++) {
310 if (pg_vec[i] != NULL) {
311 if (is_vmalloc_addr(pg_vec[i]))
312 vfree(pg_vec[i]);
313 else
314 free_pages((unsigned long)pg_vec[i], order);
315 }
316 }
317 kfree(pg_vec);
318}
319
320static void *alloc_one_pg_vec_page(unsigned long order)
321{
322 void *buffer;
323 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
324 __GFP_NOWARN | __GFP_NORETRY;
325
326 buffer = (void *)__get_free_pages(gfp_flags, order);
327 if (buffer != NULL)
328 return buffer;
329
330 buffer = vzalloc((1 << order) * PAGE_SIZE);
331 if (buffer != NULL)
332 return buffer;
333
334 gfp_flags &= ~__GFP_NORETRY;
335 return (void *)__get_free_pages(gfp_flags, order);
336}
337
338static void **alloc_pg_vec(struct netlink_sock *nlk,
339 struct nl_mmap_req *req, unsigned int order)
340{
341 unsigned int block_nr = req->nm_block_nr;
342 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200343 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000344
345 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
346 if (pg_vec == NULL)
347 return NULL;
348
349 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200350 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000351 if (pg_vec[i] == NULL)
352 goto err1;
353 }
354
355 return pg_vec;
356err1:
357 free_pg_vec(pg_vec, order, block_nr);
358 return NULL;
359}
360
Florian Westphal0470eb92015-07-21 16:33:50 +0200361
362static void
363__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
364 unsigned int order)
365{
366 struct netlink_sock *nlk = nlk_sk(sk);
367 struct sk_buff_head *queue;
368 struct netlink_ring *ring;
369
370 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
371 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
372
373 spin_lock_bh(&queue->lock);
374
375 ring->frame_max = req->nm_frame_nr - 1;
376 ring->head = 0;
377 ring->frame_size = req->nm_frame_size;
378 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
379
380 swap(ring->pg_vec_len, req->nm_block_nr);
381 swap(ring->pg_vec_order, order);
382 swap(ring->pg_vec, pg_vec);
383
384 __skb_queue_purge(queue);
385 spin_unlock_bh(&queue->lock);
386
387 WARN_ON(atomic_read(&nlk->mapped));
388
389 if (pg_vec)
390 free_pg_vec(pg_vec, order, req->nm_block_nr);
391}
392
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000393static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
Florian Westphal0470eb92015-07-21 16:33:50 +0200394 bool tx_ring)
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000395{
396 struct netlink_sock *nlk = nlk_sk(sk);
397 struct netlink_ring *ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000398 void **pg_vec = NULL;
399 unsigned int order = 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000400
401 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000402
Florian Westphal0470eb92015-07-21 16:33:50 +0200403 if (atomic_read(&nlk->mapped))
404 return -EBUSY;
405 if (atomic_read(&ring->pending))
406 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000407
408 if (req->nm_block_nr) {
409 if (ring->pg_vec != NULL)
410 return -EBUSY;
411
412 if ((int)req->nm_block_size <= 0)
413 return -EINVAL;
Tobias Klauser74e83b22014-07-31 12:17:08 +0200414 if (!PAGE_ALIGNED(req->nm_block_size))
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000415 return -EINVAL;
416 if (req->nm_frame_size < NL_MMAP_HDRLEN)
417 return -EINVAL;
418 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
419 return -EINVAL;
420
421 ring->frames_per_block = req->nm_block_size /
422 req->nm_frame_size;
423 if (ring->frames_per_block == 0)
424 return -EINVAL;
425 if (ring->frames_per_block * req->nm_block_nr !=
426 req->nm_frame_nr)
427 return -EINVAL;
428
429 order = get_order(req->nm_block_size);
430 pg_vec = alloc_pg_vec(nlk, req, order);
431 if (pg_vec == NULL)
432 return -ENOMEM;
433 } else {
434 if (req->nm_frame_nr)
435 return -EINVAL;
436 }
437
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000438 mutex_lock(&nlk->pg_vec_lock);
Florian Westphal0470eb92015-07-21 16:33:50 +0200439 if (atomic_read(&nlk->mapped) == 0) {
440 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
441 mutex_unlock(&nlk->pg_vec_lock);
442 return 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000443 }
Florian Westphal0470eb92015-07-21 16:33:50 +0200444
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000445 mutex_unlock(&nlk->pg_vec_lock);
446
447 if (pg_vec)
448 free_pg_vec(pg_vec, order, req->nm_block_nr);
Florian Westphal0470eb92015-07-21 16:33:50 +0200449
450 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000451}
452
453static void netlink_mm_open(struct vm_area_struct *vma)
454{
455 struct file *file = vma->vm_file;
456 struct socket *sock = file->private_data;
457 struct sock *sk = sock->sk;
458
459 if (sk)
460 atomic_inc(&nlk_sk(sk)->mapped);
461}
462
463static void netlink_mm_close(struct vm_area_struct *vma)
464{
465 struct file *file = vma->vm_file;
466 struct socket *sock = file->private_data;
467 struct sock *sk = sock->sk;
468
469 if (sk)
470 atomic_dec(&nlk_sk(sk)->mapped);
471}
472
473static const struct vm_operations_struct netlink_mmap_ops = {
474 .open = netlink_mm_open,
475 .close = netlink_mm_close,
476};
477
478static int netlink_mmap(struct file *file, struct socket *sock,
479 struct vm_area_struct *vma)
480{
481 struct sock *sk = sock->sk;
482 struct netlink_sock *nlk = nlk_sk(sk);
483 struct netlink_ring *ring;
484 unsigned long start, size, expected;
485 unsigned int i;
486 int err = -EINVAL;
487
488 if (vma->vm_pgoff)
489 return -EINVAL;
490
491 mutex_lock(&nlk->pg_vec_lock);
492
493 expected = 0;
494 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
495 if (ring->pg_vec == NULL)
496 continue;
497 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
498 }
499
500 if (expected == 0)
501 goto out;
502
503 size = vma->vm_end - vma->vm_start;
504 if (size != expected)
505 goto out;
506
507 start = vma->vm_start;
508 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
509 if (ring->pg_vec == NULL)
510 continue;
511
512 for (i = 0; i < ring->pg_vec_len; i++) {
513 struct page *page;
514 void *kaddr = ring->pg_vec[i];
515 unsigned int pg_num;
516
517 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
518 page = pgvec_to_page(kaddr);
519 err = vm_insert_page(vma, start, page);
520 if (err < 0)
521 goto out;
522 start += PAGE_SIZE;
523 kaddr += PAGE_SIZE;
524 }
525 }
526 }
527
528 atomic_inc(&nlk->mapped);
529 vma->vm_ops = &netlink_mmap_ops;
530 err = 0;
531out:
532 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700533 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000534}
Patrick McHardy9652e932013-04-17 06:47:02 +0000535
David Miller4682a032014-12-16 17:58:17 -0500536static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
Patrick McHardy9652e932013-04-17 06:47:02 +0000537{
538#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
539 struct page *p_start, *p_end;
540
541 /* First page is flushed through netlink_{get,set}_status */
542 p_start = pgvec_to_page(hdr + PAGE_SIZE);
David Miller4682a032014-12-16 17:58:17 -0500543 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000544 while (p_start <= p_end) {
545 flush_dcache_page(p_start);
546 p_start++;
547 }
548#endif
549}
550
551static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
552{
553 smp_rmb();
554 flush_dcache_page(pgvec_to_page(hdr));
555 return hdr->nm_status;
556}
557
558static void netlink_set_status(struct nl_mmap_hdr *hdr,
559 enum nl_mmap_status status)
560{
Thomas Grafa18e6a12014-12-18 10:30:26 +0000561 smp_mb();
Patrick McHardy9652e932013-04-17 06:47:02 +0000562 hdr->nm_status = status;
563 flush_dcache_page(pgvec_to_page(hdr));
Patrick McHardy9652e932013-04-17 06:47:02 +0000564}
565
566static struct nl_mmap_hdr *
567__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
568{
569 unsigned int pg_vec_pos, frame_off;
570
571 pg_vec_pos = pos / ring->frames_per_block;
572 frame_off = pos % ring->frames_per_block;
573
574 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
575}
576
577static struct nl_mmap_hdr *
578netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
579 enum nl_mmap_status status)
580{
581 struct nl_mmap_hdr *hdr;
582
583 hdr = __netlink_lookup_frame(ring, pos);
584 if (netlink_get_status(hdr) != status)
585 return NULL;
586
587 return hdr;
588}
589
590static struct nl_mmap_hdr *
591netlink_current_frame(const struct netlink_ring *ring,
592 enum nl_mmap_status status)
593{
594 return netlink_lookup_frame(ring, ring->head, status);
595}
596
Patrick McHardy9652e932013-04-17 06:47:02 +0000597static void netlink_increment_head(struct netlink_ring *ring)
598{
599 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
600}
601
602static void netlink_forward_ring(struct netlink_ring *ring)
603{
Ken-ichirou MATSUZAWA7084a312015-08-28 16:05:20 +0900604 unsigned int head = ring->head;
Patrick McHardy9652e932013-04-17 06:47:02 +0000605 const struct nl_mmap_hdr *hdr;
606
607 do {
Ken-ichirou MATSUZAWA7084a312015-08-28 16:05:20 +0900608 hdr = __netlink_lookup_frame(ring, ring->head);
Patrick McHardy9652e932013-04-17 06:47:02 +0000609 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
610 break;
611 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
612 break;
613 netlink_increment_head(ring);
614 } while (ring->head != head);
615}
616
Ken-ichirou MATSUZAWA0ef70772015-08-31 07:54:49 +0900617static bool netlink_has_valid_frame(struct netlink_ring *ring)
618{
619 unsigned int head = ring->head, pos = head;
620 const struct nl_mmap_hdr *hdr;
621
622 do {
623 hdr = __netlink_lookup_frame(ring, pos);
624 if (hdr->nm_status == NL_MMAP_STATUS_VALID)
625 return true;
626 pos = pos != 0 ? pos - 1 : ring->frame_max;
627 } while (pos != head);
628
629 return false;
630}
631
Patrick McHardycd1df522013-04-17 06:47:05 +0000632static bool netlink_dump_space(struct netlink_sock *nlk)
633{
634 struct netlink_ring *ring = &nlk->rx_ring;
635 struct nl_mmap_hdr *hdr;
636 unsigned int n;
637
638 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
639 if (hdr == NULL)
640 return false;
641
642 n = ring->head + ring->frame_max / 2;
643 if (n > ring->frame_max)
644 n -= ring->frame_max;
645
646 hdr = __netlink_lookup_frame(ring, n);
647
648 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
649}
650
Patrick McHardy9652e932013-04-17 06:47:02 +0000651static unsigned int netlink_poll(struct file *file, struct socket *sock,
652 poll_table *wait)
653{
654 struct sock *sk = sock->sk;
655 struct netlink_sock *nlk = nlk_sk(sk);
656 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000657 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000658
Patrick McHardycd1df522013-04-17 06:47:05 +0000659 if (nlk->rx_ring.pg_vec != NULL) {
660 /* Memory mapped sockets don't call recvmsg(), so flow control
661 * for dumps is performed here. A dump is allowed to continue
662 * if at least half the ring is unused.
663 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700664 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000665 err = netlink_dump(sk);
666 if (err < 0) {
Ben Pfaffac30ef82014-07-09 10:31:22 -0700667 sk->sk_err = -err;
Patrick McHardycd1df522013-04-17 06:47:05 +0000668 sk->sk_error_report(sk);
669 break;
670 }
671 }
672 netlink_rcv_wake(sk);
673 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000674
Patrick McHardy9652e932013-04-17 06:47:02 +0000675 mask = datagram_poll(file, sock, wait);
676
Daniel Borkmanna66e3652015-09-10 01:20:46 +0200677 /* We could already have received frames in the normal receive
678 * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
679 * so if mask contains pollin/etc already, there's no point
680 * walking the ring.
681 */
682 if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
683 spin_lock_bh(&sk->sk_receive_queue.lock);
684 if (nlk->rx_ring.pg_vec) {
685 if (netlink_has_valid_frame(&nlk->rx_ring))
686 mask |= POLLIN | POLLRDNORM;
687 }
688 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardy9652e932013-04-17 06:47:02 +0000689 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000690
691 spin_lock_bh(&sk->sk_write_queue.lock);
692 if (nlk->tx_ring.pg_vec) {
693 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
694 mask |= POLLOUT | POLLWRNORM;
695 }
696 spin_unlock_bh(&sk->sk_write_queue.lock);
697
698 return mask;
699}
700
701static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
702{
703 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
704}
705
706static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
707 struct netlink_ring *ring,
708 struct nl_mmap_hdr *hdr)
709{
710 unsigned int size;
711 void *data;
712
713 size = ring->frame_size - NL_MMAP_HDRLEN;
714 data = (void *)hdr + NL_MMAP_HDRLEN;
715
716 skb->head = data;
717 skb->data = data;
718 skb_reset_tail_pointer(skb);
719 skb->end = skb->tail + size;
720 skb->len = 0;
721
722 skb->destructor = netlink_skb_destructor;
723 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
724 NETLINK_CB(skb).sk = sk;
725}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000726
727static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
728 u32 dst_portid, u32 dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100729 struct scm_cookie *scm)
Patrick McHardy5fd96122013-04-17 06:47:03 +0000730{
731 struct netlink_sock *nlk = nlk_sk(sk);
732 struct netlink_ring *ring;
733 struct nl_mmap_hdr *hdr;
734 struct sk_buff *skb;
735 unsigned int maxlen;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000736 int err = 0, len = 0;
737
Patrick McHardy5fd96122013-04-17 06:47:03 +0000738 mutex_lock(&nlk->pg_vec_lock);
739
740 ring = &nlk->tx_ring;
741 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
742
743 do {
David Miller4682a032014-12-16 17:58:17 -0500744 unsigned int nm_len;
745
Patrick McHardy5fd96122013-04-17 06:47:03 +0000746 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
747 if (hdr == NULL) {
748 if (!(msg->msg_flags & MSG_DONTWAIT) &&
749 atomic_read(&nlk->tx_ring.pending))
750 schedule();
751 continue;
752 }
David Miller4682a032014-12-16 17:58:17 -0500753
754 nm_len = ACCESS_ONCE(hdr->nm_len);
755 if (nm_len > maxlen) {
Patrick McHardy5fd96122013-04-17 06:47:03 +0000756 err = -EINVAL;
757 goto out;
758 }
759
David Miller4682a032014-12-16 17:58:17 -0500760 netlink_frame_flush_dcache(hdr, nm_len);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000761
David Miller4682a032014-12-16 17:58:17 -0500762 skb = alloc_skb(nm_len, GFP_KERNEL);
763 if (skb == NULL) {
764 err = -ENOBUFS;
765 goto out;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000766 }
David Miller4682a032014-12-16 17:58:17 -0500767 __skb_put(skb, nm_len);
768 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
769 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000770
771 netlink_increment_head(ring);
772
773 NETLINK_CB(skb).portid = nlk->portid;
774 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100775 NETLINK_CB(skb).creds = scm->creds;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000776
777 err = security_netlink_send(sk, skb);
778 if (err) {
779 kfree_skb(skb);
780 goto out;
781 }
782
783 if (unlikely(dst_group)) {
784 atomic_inc(&skb->users);
785 netlink_broadcast(sk, skb, dst_portid, dst_group,
786 GFP_KERNEL);
787 }
788 err = netlink_unicast(sk, skb, dst_portid,
789 msg->msg_flags & MSG_DONTWAIT);
790 if (err < 0)
791 goto out;
792 len += err;
793
794 } while (hdr != NULL ||
795 (!(msg->msg_flags & MSG_DONTWAIT) &&
796 atomic_read(&nlk->tx_ring.pending)));
797
798 if (len > 0)
799 err = len;
800out:
801 mutex_unlock(&nlk->pg_vec_lock);
802 return err;
803}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000804
805static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
806{
807 struct nl_mmap_hdr *hdr;
808
809 hdr = netlink_mmap_hdr(skb);
810 hdr->nm_len = skb->len;
811 hdr->nm_group = NETLINK_CB(skb).dst_group;
812 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200813 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
814 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
David Miller4682a032014-12-16 17:58:17 -0500815 netlink_frame_flush_dcache(hdr, hdr->nm_len);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000816 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
817
818 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
819 kfree_skb(skb);
820}
821
822static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
823{
824 struct netlink_sock *nlk = nlk_sk(sk);
825 struct netlink_ring *ring = &nlk->rx_ring;
826 struct nl_mmap_hdr *hdr;
827
828 spin_lock_bh(&sk->sk_receive_queue.lock);
829 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
830 if (hdr == NULL) {
831 spin_unlock_bh(&sk->sk_receive_queue.lock);
832 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000833 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000834 return;
835 }
836 netlink_increment_head(ring);
837 __skb_queue_tail(&sk->sk_receive_queue, skb);
838 spin_unlock_bh(&sk->sk_receive_queue.lock);
839
840 hdr->nm_len = skb->len;
841 hdr->nm_group = NETLINK_CB(skb).dst_group;
842 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200843 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
844 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000845 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
846}
847
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000848#else /* CONFIG_NETLINK_MMAP */
Patrick McHardy9652e932013-04-17 06:47:02 +0000849#define netlink_skb_is_mmaped(skb) false
Patrick McHardyf9c22882013-04-17 06:47:04 +0000850#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000851#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000852#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000853#define netlink_poll datagram_poll
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100854#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000855#endif /* CONFIG_NETLINK_MMAP */
856
Patrick McHardycf0a0182013-04-17 06:47:00 +0000857static void netlink_skb_destructor(struct sk_buff *skb)
858{
Patrick McHardy9652e932013-04-17 06:47:02 +0000859#ifdef CONFIG_NETLINK_MMAP
860 struct nl_mmap_hdr *hdr;
861 struct netlink_ring *ring;
862 struct sock *sk;
863
864 /* If a packet from the kernel to userspace was freed because of an
865 * error without being delivered to userspace, the kernel must reset
866 * the status. In the direction userspace to kernel, the status is
867 * always reset here after the packet was processed and freed.
868 */
869 if (netlink_skb_is_mmaped(skb)) {
870 hdr = netlink_mmap_hdr(skb);
871 sk = NETLINK_CB(skb).sk;
872
Patrick McHardy5fd96122013-04-17 06:47:03 +0000873 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
874 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
875 ring = &nlk_sk(sk)->tx_ring;
876 } else {
877 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
878 hdr->nm_len = 0;
879 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
880 }
881 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000882 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000883
884 WARN_ON(atomic_read(&ring->pending) == 0);
885 atomic_dec(&ring->pending);
886 sock_put(sk);
887
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000888 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000889 }
890#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000891 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200892 if (!skb->cloned ||
893 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
894 vfree(skb->head);
895
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000896 skb->head = NULL;
897 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000898 if (skb->sk != NULL)
899 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000900}
901
902static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
903{
904 WARN_ON(skb->sk != NULL);
905 skb->sk = sk;
906 skb->destructor = netlink_skb_destructor;
907 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
908 sk_mem_charge(sk, skb->truesize);
909}
910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911static void netlink_sock_destruct(struct sock *sk)
912{
Herbert Xu3f660d62007-05-03 03:17:14 -0700913 struct netlink_sock *nlk = nlk_sk(sk);
914
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700915 if (nlk->cb_running) {
916 if (nlk->cb.done)
917 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000918
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700919 module_put(nlk->cb.module);
920 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700921 }
922
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000924#ifdef CONFIG_NETLINK_MMAP
925 if (1) {
926 struct nl_mmap_req req;
927
928 memset(&req, 0, sizeof(req));
929 if (nlk->rx_ring.pg_vec)
Florian Westphal0470eb92015-07-21 16:33:50 +0200930 __netlink_set_ring(sk, &req, false, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000931 memset(&req, 0, sizeof(req));
932 if (nlk->tx_ring.pg_vec)
Florian Westphal0470eb92015-07-21 16:33:50 +0200933 __netlink_set_ring(sk, &req, true, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000934 }
935#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
937 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800938 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 return;
940 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700941
942 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
943 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
944 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945}
946
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800947/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
948 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
950 * this, _but_ remember, it adds useless work on UP machines.
951 */
952
Johannes Bergd136f1b2009-09-12 03:03:15 +0000953void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800954 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000956 might_sleep();
957
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700958 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
960 if (atomic_read(&nl_table_users)) {
961 DECLARE_WAITQUEUE(wait, current);
962
963 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800964 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 set_current_state(TASK_UNINTERRUPTIBLE);
966 if (atomic_read(&nl_table_users) == 0)
967 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700968 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700970 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 }
972
973 __set_current_state(TASK_RUNNING);
974 remove_wait_queue(&nl_table_wait, &wait);
975 }
976}
977
Johannes Bergd136f1b2009-09-12 03:03:15 +0000978void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800979 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700981 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 wake_up(&nl_table_wait);
983}
984
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800985static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986netlink_lock_table(void)
987{
988 /* read_lock() synchronizes us to netlink_table_grab */
989
990 read_lock(&nl_table_lock);
991 atomic_inc(&nl_table_users);
992 read_unlock(&nl_table_lock);
993}
994
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800995static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996netlink_unlock_table(void)
997{
998 if (atomic_dec_and_test(&nl_table_users))
999 wake_up(&nl_table_wait);
1000}
1001
Thomas Grafe3416942014-08-02 11:47:45 +02001002struct netlink_compare_arg
Gao fengda12c902013-06-06 14:49:11 +08001003{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001004 possible_net_t pnet;
Thomas Grafe3416942014-08-02 11:47:45 +02001005 u32 portid;
1006};
1007
Herbert Xu8f2ddaa2015-03-21 14:14:03 +11001008/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1009#define netlink_compare_arg_len \
1010 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
Thomas Grafe3416942014-08-02 11:47:45 +02001011
Herbert Xuc428ecd2015-03-20 21:57:01 +11001012static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1013 const void *ptr)
1014{
1015 const struct netlink_compare_arg *x = arg->key;
1016 const struct netlink_sock *nlk = ptr;
1017
1018 return nlk->portid != x->portid ||
1019 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1020}
1021
1022static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1023 struct net *net, u32 portid)
1024{
1025 memset(arg, 0, sizeof(*arg));
1026 write_pnet(&arg->pnet, net);
1027 arg->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001028}
1029
1030static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1031 struct net *net)
1032{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001033 struct netlink_compare_arg arg;
Thomas Grafe3416942014-08-02 11:47:45 +02001034
Herbert Xuc428ecd2015-03-20 21:57:01 +11001035 netlink_compare_arg_init(&arg, net, portid);
1036 return rhashtable_lookup_fast(&table->hash, &arg,
1037 netlink_rhashtable_params);
Gao fengda12c902013-06-06 14:49:11 +08001038}
1039
Herbert Xuc428ecd2015-03-20 21:57:01 +11001040static int __netlink_insert(struct netlink_table *table, struct sock *sk)
Ying Xuec5adde92015-01-12 14:52:23 +08001041{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001042 struct netlink_compare_arg arg;
Ying Xuec5adde92015-01-12 14:52:23 +08001043
Herbert Xuc428ecd2015-03-20 21:57:01 +11001044 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1045 return rhashtable_lookup_insert_key(&table->hash, &arg,
1046 &nlk_sk(sk)->node,
1047 netlink_rhashtable_params);
Ying Xuec5adde92015-01-12 14:52:23 +08001048}
1049
Eric W. Biederman15e47302012-09-07 20:12:54 +00001050static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051{
Gao fengda12c902013-06-06 14:49:11 +08001052 struct netlink_table *table = &nl_table[protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Thomas Grafe3416942014-08-02 11:47:45 +02001055 rcu_read_lock();
1056 sk = __netlink_lookup(table, portid, net);
1057 if (sk)
1058 sock_hold(sk);
1059 rcu_read_unlock();
1060
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 return sk;
1062}
1063
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001064static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
Patrick McHardy4277a082006-03-20 18:52:01 -08001066static void
1067netlink_update_listeners(struct sock *sk)
1068{
1069 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001070 unsigned long mask;
1071 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001072 struct listeners *listeners;
1073
1074 listeners = nl_deref_protected(tbl->listeners);
1075 if (!listeners)
1076 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001077
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001078 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001079 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001080 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001081 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1082 mask |= nlk_sk(sk)->groups[i];
1083 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001084 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001085 }
1086 /* this function is only called with the netlink table "grabbed", which
1087 * makes sure updates are visible before bind or setsockopt return. */
1088}
1089
Herbert Xu8ea65f42015-01-26 14:02:56 +11001090static int netlink_insert(struct sock *sk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091{
Gao fengda12c902013-06-06 14:49:11 +08001092 struct netlink_table *table = &nl_table[sk->sk_protocol];
Herbert Xu919d9db2015-01-16 17:23:48 +11001093 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
Ying Xuec5adde92015-01-12 14:52:23 +08001095 lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
1097 err = -EBUSY;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001098 if (nlk_sk(sk)->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 goto err;
1100
1101 err = -ENOMEM;
Thomas Graf97defe12015-01-02 23:00:20 +01001102 if (BITS_PER_LONG > 32 &&
1103 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 goto err;
1105
Eric W. Biederman15e47302012-09-07 20:12:54 +00001106 nlk_sk(sk)->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001107 sock_hold(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001108
Herbert Xuc428ecd2015-03-20 21:57:01 +11001109 err = __netlink_insert(table, sk);
1110 if (err) {
Daniel Borkmann4e7c1332015-08-07 00:26:41 +02001111 /* In case the hashtable backend returns with -EBUSY
1112 * from here, it must not escape to the caller.
1113 */
1114 if (unlikely(err == -EBUSY))
1115 err = -EOVERFLOW;
Herbert Xuc428ecd2015-03-20 21:57:01 +11001116 if (err == -EEXIST)
1117 err = -EADDRINUSE;
Herbert Xuc0bb07d2015-05-16 21:50:28 +08001118 nlk_sk(sk)->portid = 0;
Ying Xuec5adde92015-01-12 14:52:23 +08001119 sock_put(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001120 }
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122err:
Ying Xuec5adde92015-01-12 14:52:23 +08001123 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 return err;
1125}
1126
1127static void netlink_remove(struct sock *sk)
1128{
Thomas Grafe3416942014-08-02 11:47:45 +02001129 struct netlink_table *table;
1130
Thomas Grafe3416942014-08-02 11:47:45 +02001131 table = &nl_table[sk->sk_protocol];
Herbert Xuc428ecd2015-03-20 21:57:01 +11001132 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1133 netlink_rhashtable_params)) {
Thomas Grafe3416942014-08-02 11:47:45 +02001134 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1135 __sock_put(sk);
1136 }
Thomas Grafe3416942014-08-02 11:47:45 +02001137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 netlink_table_grab();
Johannes Bergb10dcb32014-12-22 18:56:37 +01001139 if (nlk_sk(sk)->subscriptions) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 __sk_del_bind_node(sk);
Johannes Bergb10dcb32014-12-22 18:56:37 +01001141 netlink_update_listeners(sk);
1142 }
Johannes Bergee1c24422015-01-16 11:37:14 +01001143 if (sk->sk_protocol == NETLINK_GENERIC)
1144 atomic_inc(&genl_sk_destructing_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 netlink_table_ungrab();
1146}
1147
1148static struct proto netlink_proto = {
1149 .name = "NETLINK",
1150 .owner = THIS_MODULE,
1151 .obj_size = sizeof(struct netlink_sock),
1152};
1153
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001154static int __netlink_create(struct net *net, struct socket *sock,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001155 struct mutex *cb_mutex, int protocol,
1156 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157{
1158 struct sock *sk;
1159 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001160
1161 sock->ops = &netlink_ops;
1162
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001163 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
Patrick McHardyab33a172005-08-14 19:31:36 -07001164 if (!sk)
1165 return -ENOMEM;
1166
1167 sock_init_data(sock, sk);
1168
1169 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001170 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001171 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001172 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001173 nlk->cb_mutex = &nlk->cb_def_mutex;
1174 mutex_init(nlk->cb_mutex);
1175 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001176 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001177#ifdef CONFIG_NETLINK_MMAP
1178 mutex_init(&nlk->pg_vec_lock);
1179#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001180
1181 sk->sk_destruct = netlink_sock_destruct;
1182 sk->sk_protocol = protocol;
1183 return 0;
1184}
1185
Eric Paris3f378b62009-11-05 22:18:14 -08001186static int netlink_create(struct net *net, struct socket *sock, int protocol,
1187 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001188{
1189 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001190 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001191 struct netlink_sock *nlk;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001192 int (*bind)(struct net *net, int group);
1193 void (*unbind)(struct net *net, int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001194 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
1196 sock->state = SS_UNCONNECTED;
1197
1198 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1199 return -ESOCKTNOSUPPORT;
1200
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001201 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 return -EPROTONOSUPPORT;
1203
Patrick McHardy77247bb2005-08-14 19:27:13 -07001204 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001205#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001206 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001207 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001208 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001209 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001210 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001211#endif
1212 if (nl_table[protocol].registered &&
1213 try_module_get(nl_table[protocol].module))
1214 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001215 else
1216 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001217 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001218 bind = nl_table[protocol].bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001219 unbind = nl_table[protocol].unbind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001220 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001221
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001222 if (err < 0)
1223 goto out;
1224
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001225 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001226 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001227 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
David S. Miller6f756a82008-11-23 17:34:03 -08001229 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001230 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001231 local_bh_enable();
1232
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001233 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001234 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001235 nlk->netlink_bind = bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001236 nlk->netlink_unbind = unbind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001237out:
1238 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
Patrick McHardyab33a172005-08-14 19:31:36 -07001240out_module:
1241 module_put(module);
1242 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Thomas Graf21e49022015-01-02 23:00:22 +01001245static void deferred_put_nlk_sk(struct rcu_head *head)
1246{
1247 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1248
1249 sock_put(&nlk->sk);
1250}
1251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252static int netlink_release(struct socket *sock)
1253{
1254 struct sock *sk = sock->sk;
1255 struct netlink_sock *nlk;
1256
1257 if (!sk)
1258 return 0;
1259
1260 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001261 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 nlk = nlk_sk(sk);
1263
Herbert Xu3f660d62007-05-03 03:17:14 -07001264 /*
1265 * OK. Socket is unlinked, any packets that arrive now
1266 * will be purged.
1267 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Johannes Bergee1c24422015-01-16 11:37:14 +01001269 /* must not acquire netlink_table_lock in any way again before unbind
1270 * and notifying genetlink is done as otherwise it might deadlock
1271 */
1272 if (nlk->netlink_unbind) {
1273 int i;
1274
1275 for (i = 0; i < nlk->ngroups; i++)
1276 if (test_bit(i, nlk->groups))
1277 nlk->netlink_unbind(sock_net(sk), i + 1);
1278 }
1279 if (sk->sk_protocol == NETLINK_GENERIC &&
1280 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1281 wake_up(&genl_sk_destructing_waitq);
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 sock->sk = NULL;
1284 wake_up_interruptible_all(&nlk->wait);
1285
1286 skb_queue_purge(&sk->sk_write_queue);
1287
Eric W. Biederman15e47302012-09-07 20:12:54 +00001288 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001290 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001292 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 };
Alan Sterne041c682006-03-27 01:16:30 -08001294 atomic_notifier_call_chain(&netlink_chain,
1295 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001296 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001297
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001298 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001299
Denis V. Lunevaed81562007-10-10 21:14:32 -07001300 if (netlink_is_kernel(sk)) {
Johannes Bergb10dcb32014-12-22 18:56:37 +01001301 netlink_table_grab();
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001302 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1303 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001304 struct listeners *old;
1305
1306 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1307 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1308 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001309 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001310 nl_table[sk->sk_protocol].bind = NULL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001311 nl_table[sk->sk_protocol].unbind = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001312 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001313 nl_table[sk->sk_protocol].registered = 0;
1314 }
Johannes Bergb10dcb32014-12-22 18:56:37 +01001315 netlink_table_ungrab();
Eric Dumazet658cb352012-04-22 21:30:21 +00001316 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07001317
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001318 kfree(nlk->groups);
1319 nlk->groups = NULL;
1320
Eric Dumazet37558102008-11-24 14:05:22 -08001321 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001322 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001323 local_bh_enable();
Thomas Graf21e49022015-01-02 23:00:22 +01001324 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 return 0;
1326}
1327
1328static int netlink_autobind(struct socket *sock)
1329{
1330 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001331 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001332 struct netlink_table *table = &nl_table[sk->sk_protocol];
Eric W. Biederman15e47302012-09-07 20:12:54 +00001333 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 int err;
Herbert Xub9fbe702015-05-17 10:45:34 +08001335 s32 rover = -4096;
1336 bool ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338retry:
1339 cond_resched();
Thomas Grafe3416942014-08-02 11:47:45 +02001340 rcu_read_lock();
Herbert Xub9fbe702015-05-17 10:45:34 +08001341 ok = !__netlink_lookup(table, portid, net);
1342 rcu_read_unlock();
1343 if (!ok) {
Thomas Grafe3416942014-08-02 11:47:45 +02001344 /* Bind collision, search negative portid values. */
Herbert Xub9fbe702015-05-17 10:45:34 +08001345 if (rover == -4096)
1346 /* rover will be in range [S32_MIN, -4097] */
1347 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
1348 else if (rover >= -4096)
Thomas Grafe3416942014-08-02 11:47:45 +02001349 rover = -4097;
Herbert Xub9fbe702015-05-17 10:45:34 +08001350 portid = rover--;
Thomas Grafe3416942014-08-02 11:47:45 +02001351 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Herbert Xu8ea65f42015-01-26 14:02:56 +11001354 err = netlink_insert(sk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 if (err == -EADDRINUSE)
1356 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001357
1358 /* If 2 threads race to autobind, that is fine. */
1359 if (err == -EBUSY)
1360 err = 0;
1361
1362 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363}
1364
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001365/**
1366 * __netlink_ns_capable - General netlink message capability test
1367 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1368 * @user_ns: The user namespace of the capability to use
1369 * @cap: The capability to use
1370 *
1371 * Test to see if the opener of the socket we received the message
1372 * from had when the netlink socket was created and the sender of the
1373 * message has has the capability @cap in the user namespace @user_ns.
1374 */
1375bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1376 struct user_namespace *user_ns, int cap)
1377{
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07001378 return ((nsp->flags & NETLINK_SKB_DST) ||
1379 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1380 ns_capable(user_ns, cap);
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001381}
1382EXPORT_SYMBOL(__netlink_ns_capable);
1383
1384/**
1385 * netlink_ns_capable - General netlink message capability test
1386 * @skb: socket buffer holding a netlink command from userspace
1387 * @user_ns: The user namespace of the capability to use
1388 * @cap: The capability to use
1389 *
1390 * Test to see if the opener of the socket we received the message
1391 * from had when the netlink socket was created and the sender of the
1392 * message has has the capability @cap in the user namespace @user_ns.
1393 */
1394bool netlink_ns_capable(const struct sk_buff *skb,
1395 struct user_namespace *user_ns, int cap)
1396{
1397 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1398}
1399EXPORT_SYMBOL(netlink_ns_capable);
1400
1401/**
1402 * netlink_capable - Netlink global message capability test
1403 * @skb: socket buffer holding a netlink command from userspace
1404 * @cap: The capability to use
1405 *
1406 * Test to see if the opener of the socket we received the message
1407 * from had when the netlink socket was created and the sender of the
1408 * message has has the capability @cap in all user namespaces.
1409 */
1410bool netlink_capable(const struct sk_buff *skb, int cap)
1411{
1412 return netlink_ns_capable(skb, &init_user_ns, cap);
1413}
1414EXPORT_SYMBOL(netlink_capable);
1415
1416/**
1417 * netlink_net_capable - Netlink network namespace message capability test
1418 * @skb: socket buffer holding a netlink command from userspace
1419 * @cap: The capability to use
1420 *
1421 * Test to see if the opener of the socket we received the message
1422 * from had when the netlink socket was created and the sender of the
1423 * message has has the capability @cap over the network namespace of
1424 * the socket we received the message from.
1425 */
1426bool netlink_net_capable(const struct sk_buff *skb, int cap)
1427{
1428 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1429}
1430EXPORT_SYMBOL(netlink_net_capable);
1431
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001432static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001433{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001434 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001435 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001436}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001438static void
1439netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1440{
1441 struct netlink_sock *nlk = nlk_sk(sk);
1442
1443 if (nlk->subscriptions && !subscriptions)
1444 __sk_del_bind_node(sk);
1445 else if (!nlk->subscriptions && subscriptions)
1446 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1447 nlk->subscriptions = subscriptions;
1448}
1449
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001450static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001451{
1452 struct netlink_sock *nlk = nlk_sk(sk);
1453 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001454 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001455 int err = 0;
1456
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001457 netlink_table_grab();
1458
Patrick McHardy513c2502005-09-06 15:43:59 -07001459 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001460 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001461 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001462 goto out_unlock;
1463 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001464
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001465 if (nlk->ngroups >= groups)
1466 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001467
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001468 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1469 if (new_groups == NULL) {
1470 err = -ENOMEM;
1471 goto out_unlock;
1472 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001473 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001474 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1475
1476 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001477 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001478 out_unlock:
1479 netlink_table_ungrab();
1480 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001481}
1482
Johannes Berg02c81ab2014-12-22 18:56:35 +01001483static void netlink_undo_bind(int group, long unsigned int groups,
Johannes Berg023e2cf2014-12-23 21:00:06 +01001484 struct sock *sk)
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001485{
Johannes Berg023e2cf2014-12-23 21:00:06 +01001486 struct netlink_sock *nlk = nlk_sk(sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001487 int undo;
1488
1489 if (!nlk->netlink_unbind)
1490 return;
1491
1492 for (undo = 0; undo < group; undo++)
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001493 if (test_bit(undo, &groups))
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001494 nlk->netlink_unbind(sock_net(sk), undo + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001495}
1496
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001497static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1498 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499{
1500 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001501 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 struct netlink_sock *nlk = nlk_sk(sk);
1503 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1504 int err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001505 long unsigned int groups = nladdr->nl_groups;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001506
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001507 if (addr_len < sizeof(struct sockaddr_nl))
1508 return -EINVAL;
1509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 if (nladdr->nl_family != AF_NETLINK)
1511 return -EINVAL;
1512
1513 /* Only superuser is allowed to listen multicasts */
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001514 if (groups) {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001515 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001516 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001517 err = netlink_realloc_groups(sk);
1518 if (err)
1519 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001522 if (nlk->portid)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001523 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 return -EINVAL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001525
1526 if (nlk->netlink_bind && groups) {
1527 int group;
1528
1529 for (group = 0; group < nlk->ngroups; group++) {
1530 if (!test_bit(group, &groups))
1531 continue;
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001532 err = nlk->netlink_bind(net, group + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001533 if (!err)
1534 continue;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001535 netlink_undo_bind(group, groups, sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001536 return err;
1537 }
1538 }
1539
1540 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 err = nladdr->nl_pid ?
Herbert Xu8ea65f42015-01-26 14:02:56 +11001542 netlink_insert(sk, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 netlink_autobind(sock);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001544 if (err) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01001545 netlink_undo_bind(nlk->ngroups, groups, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 return err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001547 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 }
1549
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001550 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 return 0;
1552
1553 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001554 netlink_update_subscriptions(sk, nlk->subscriptions +
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001555 hweight32(groups) -
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001556 hweight32(nlk->groups[0]));
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001557 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001558 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 netlink_table_ungrab();
1560
1561 return 0;
1562}
1563
1564static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1565 int alen, int flags)
1566{
1567 int err = 0;
1568 struct sock *sk = sock->sk;
1569 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001570 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Changli Gao6503d962010-03-31 22:58:26 +00001572 if (alen < sizeof(addr->sa_family))
1573 return -EINVAL;
1574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 if (addr->sa_family == AF_UNSPEC) {
1576 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001577 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001578 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 return 0;
1580 }
1581 if (addr->sa_family != AF_NETLINK)
1582 return -EINVAL;
1583
Mike Pecovnik46833a82014-02-24 21:11:16 +01001584 if ((nladdr->nl_groups || nladdr->nl_pid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001585 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 return -EPERM;
1587
Eric W. Biederman15e47302012-09-07 20:12:54 +00001588 if (!nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 err = netlink_autobind(sock);
1590
1591 if (err == 0) {
1592 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001593 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001594 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 }
1596
1597 return err;
1598}
1599
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001600static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1601 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602{
1603 struct sock *sk = sock->sk;
1604 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001605 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001606
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 nladdr->nl_family = AF_NETLINK;
1608 nladdr->nl_pad = 0;
1609 *addr_len = sizeof(*nladdr);
1610
1611 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001612 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001613 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001615 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001616 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 }
1618 return 0;
1619}
1620
Eric W. Biederman15e47302012-09-07 20:12:54 +00001621static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 struct sock *sock;
1624 struct netlink_sock *nlk;
1625
Eric W. Biederman15e47302012-09-07 20:12:54 +00001626 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 if (!sock)
1628 return ERR_PTR(-ECONNREFUSED);
1629
1630 /* Don't bother queuing skb if kernel socket has no input function */
1631 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001632 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001633 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 sock_put(sock);
1635 return ERR_PTR(-ECONNREFUSED);
1636 }
1637 return sock;
1638}
1639
1640struct sock *netlink_getsockbyfilp(struct file *filp)
1641{
Al Viro496ad9a2013-01-23 17:07:38 -05001642 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 struct sock *sock;
1644
1645 if (!S_ISSOCK(inode->i_mode))
1646 return ERR_PTR(-ENOTSOCK);
1647
1648 sock = SOCKET_I(inode)->sk;
1649 if (sock->sk_family != AF_NETLINK)
1650 return ERR_PTR(-EINVAL);
1651
1652 sock_hold(sock);
1653 return sock;
1654}
1655
Pablo Neira3a365152013-06-28 03:04:23 +02001656static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1657 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001658{
1659 struct sk_buff *skb;
1660 void *data;
1661
Pablo Neira3a365152013-06-28 03:04:23 +02001662 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001663 return alloc_skb(size, GFP_KERNEL);
1664
Pablo Neira3a365152013-06-28 03:04:23 +02001665 size = SKB_DATA_ALIGN(size) +
1666 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001667
1668 data = vmalloc(size);
1669 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001670 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001671
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001672 skb = __build_skb(data, size);
Pablo Neira3a365152013-06-28 03:04:23 +02001673 if (skb == NULL)
1674 vfree(data);
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001675 else
Pablo Neira3a365152013-06-28 03:04:23 +02001676 skb->destructor = netlink_skb_destructor;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001677
1678 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001679}
1680
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681/*
1682 * Attach a skb to a netlink socket.
1683 * The caller must hold a reference to the destination socket. On error, the
1684 * reference is dropped. The skb is not send to the destination, just all
1685 * all error checks are performed and memory in the queue is reserved.
1686 * Return values:
1687 * < 0: error. skb freed, reference to sock dropped.
1688 * 0: continue
1689 * 1: repeat lookup - reference dropped while waiting for socket memory.
1690 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001691int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001692 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693{
1694 struct netlink_sock *nlk;
1695
1696 nlk = nlk_sk(sk);
1697
Patrick McHardy5fd96122013-04-17 06:47:03 +00001698 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001699 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
Patrick McHardy5fd96122013-04-17 06:47:03 +00001700 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001702 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001703 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 netlink_overrun(sk);
1705 sock_put(sk);
1706 kfree_skb(skb);
1707 return -EAGAIN;
1708 }
1709
1710 __set_current_state(TASK_INTERRUPTIBLE);
1711 add_wait_queue(&nlk->wait, &wait);
1712
1713 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001714 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001716 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 __set_current_state(TASK_RUNNING);
1719 remove_wait_queue(&nlk->wait, &wait);
1720 sock_put(sk);
1721
1722 if (signal_pending(current)) {
1723 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001724 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 }
1726 return 1;
1727 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001728 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 return 0;
1730}
1731
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001732static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 int len = skb->len;
1735
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001736 netlink_deliver_tap(skb);
1737
Patrick McHardyf9c22882013-04-17 06:47:04 +00001738#ifdef CONFIG_NETLINK_MMAP
1739 if (netlink_skb_is_mmaped(skb))
1740 netlink_queue_mmaped_skb(sk, skb);
1741 else if (netlink_rx_is_mmaped(sk))
1742 netlink_ring_set_copied(sk, skb);
1743 else
1744#endif /* CONFIG_NETLINK_MMAP */
1745 skb_queue_tail(&sk->sk_receive_queue, skb);
David S. Miller676d2362014-04-11 16:15:36 -04001746 sk->sk_data_ready(sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001747 return len;
1748}
1749
1750int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1751{
1752 int len = __netlink_sendskb(sk, skb);
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 sock_put(sk);
1755 return len;
1756}
1757
1758void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1759{
1760 kfree_skb(skb);
1761 sock_put(sk);
1762}
1763
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001764static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765{
1766 int delta;
1767
Patrick McHardy1298ca42013-04-17 06:46:59 +00001768 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001769 if (netlink_skb_is_mmaped(skb))
1770 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001772 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001773 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 return skb;
1775
1776 if (skb_shared(skb)) {
1777 struct sk_buff *nskb = skb_clone(skb, allocation);
1778 if (!nskb)
1779 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001780 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 skb = nskb;
1782 }
1783
1784 if (!pskb_expand_head(skb, 0, -delta, allocation))
1785 skb->truesize -= delta;
1786
1787 return skb;
1788}
1789
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001790static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1791 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001792{
1793 int ret;
1794 struct netlink_sock *nlk = nlk_sk(sk);
1795
1796 ret = -ECONNREFUSED;
1797 if (nlk->netlink_rcv != NULL) {
1798 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001799 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001800 NETLINK_CB(skb).sk = ssk;
Daniel Borkmann73bfd372013-12-23 14:35:55 +01001801 netlink_deliver_tap_kernel(sk, ssk, skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001802 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001803 consume_skb(skb);
1804 } else {
1805 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001806 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001807 sock_put(sk);
1808 return ret;
1809}
1810
1811int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001812 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813{
1814 struct sock *sk;
1815 int err;
1816 long timeo;
1817
1818 skb = netlink_trim(skb, gfp_any());
1819
1820 timeo = sock_sndtimeo(ssk, nonblock);
1821retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001822 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 if (IS_ERR(sk)) {
1824 kfree_skb(skb);
1825 return PTR_ERR(sk);
1826 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001827 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001828 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001829
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001830 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001831 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001832 kfree_skb(skb);
1833 sock_put(sk);
1834 return err;
1835 }
1836
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001837 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 if (err == 1)
1839 goto retry;
1840 if (err)
1841 return err;
1842
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001843 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001845EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
Patrick McHardyf9c22882013-04-17 06:47:04 +00001847struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1848 u32 dst_portid, gfp_t gfp_mask)
1849{
1850#ifdef CONFIG_NETLINK_MMAP
1851 struct sock *sk = NULL;
1852 struct sk_buff *skb;
1853 struct netlink_ring *ring;
1854 struct nl_mmap_hdr *hdr;
1855 unsigned int maxlen;
1856
1857 sk = netlink_getsockbyportid(ssk, dst_portid);
1858 if (IS_ERR(sk))
1859 goto out;
1860
1861 ring = &nlk_sk(sk)->rx_ring;
1862 /* fast-path without atomic ops for common case: non-mmaped receiver */
1863 if (ring->pg_vec == NULL)
1864 goto out_put;
1865
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001866 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1867 goto out_put;
1868
Patrick McHardyf9c22882013-04-17 06:47:04 +00001869 skb = alloc_skb_head(gfp_mask);
1870 if (skb == NULL)
1871 goto err1;
1872
1873 spin_lock_bh(&sk->sk_receive_queue.lock);
1874 /* check again under lock */
1875 if (ring->pg_vec == NULL)
1876 goto out_free;
1877
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001878 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001879 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1880 if (maxlen < size)
1881 goto out_free;
1882
1883 netlink_forward_ring(ring);
1884 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1885 if (hdr == NULL)
1886 goto err2;
1887 netlink_ring_setup_skb(skb, sk, ring, hdr);
1888 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1889 atomic_inc(&ring->pending);
1890 netlink_increment_head(ring);
1891
1892 spin_unlock_bh(&sk->sk_receive_queue.lock);
1893 return skb;
1894
1895err2:
1896 kfree_skb(skb);
1897 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001898 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001899err1:
1900 sock_put(sk);
1901 return NULL;
1902
1903out_free:
1904 kfree_skb(skb);
1905 spin_unlock_bh(&sk->sk_receive_queue.lock);
1906out_put:
1907 sock_put(sk);
1908out:
1909#endif
1910 return alloc_skb(size, gfp_mask);
1911}
1912EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1913
Patrick McHardy4277a082006-03-20 18:52:01 -08001914int netlink_has_listeners(struct sock *sk, unsigned int group)
1915{
1916 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001917 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001918
Denis V. Lunevaed81562007-10-10 21:14:32 -07001919 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001920
1921 rcu_read_lock();
1922 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1923
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001924 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001925 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001926
1927 rcu_read_unlock();
1928
Patrick McHardy4277a082006-03-20 18:52:01 -08001929 return res;
1930}
1931EXPORT_SYMBOL_GPL(netlink_has_listeners);
1932
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001933static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934{
1935 struct netlink_sock *nlk = nlk_sk(sk);
1936
1937 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001938 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001939 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001940 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001941 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 }
1943 return -1;
1944}
1945
1946struct netlink_broadcast_data {
1947 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001948 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001949 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 u32 group;
1951 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001952 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 int congested;
1954 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001955 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001957 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1958 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959};
1960
Rami Rosen46c95212014-07-01 21:17:35 +03001961static void do_one_broadcast(struct sock *sk,
1962 struct netlink_broadcast_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
1964 struct netlink_sock *nlk = nlk_sk(sk);
1965 int val;
1966
1967 if (p->exclude_sk == sk)
Rami Rosen46c95212014-07-01 21:17:35 +03001968 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Eric W. Biederman15e47302012-09-07 20:12:54 +00001970 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001971 !test_bit(p->group - 1, nlk->groups))
Rami Rosen46c95212014-07-01 21:17:35 +03001972 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001974 if (!net_eq(sock_net(sk), p->net)) {
1975 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1976 return;
1977
1978 if (!peernet_has_id(sock_net(sk), p->net))
1979 return;
1980
1981 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1982 CAP_NET_BROADCAST))
1983 return;
1984 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 if (p->failure) {
1987 netlink_overrun(sk);
Rami Rosen46c95212014-07-01 21:17:35 +03001988 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 }
1990
1991 sock_hold(sk);
1992 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001993 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 p->skb2 = skb_clone(p->skb, p->allocation);
1995 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001996 p->skb2 = skb_get(p->skb);
1997 /*
1998 * skb ownership may have been set when
1999 * delivered to a previous socket.
2000 */
2001 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 }
2003 }
2004 if (p->skb2 == NULL) {
2005 netlink_overrun(sk);
2006 /* Clone failed. Notify ALL listeners. */
2007 p->failure = 1;
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002008 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002009 p->delivery_failure = 1;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002010 goto out;
2011 }
2012 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002013 kfree_skb(p->skb2);
2014 p->skb2 = NULL;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002015 goto out;
2016 }
2017 if (sk_filter(sk, p->skb2)) {
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002018 kfree_skb(p->skb2);
2019 p->skb2 = NULL;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002020 goto out;
2021 }
2022 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
2023 NETLINK_CB(p->skb2).nsid_is_set = true;
2024 val = netlink_broadcast_deliver(sk, p->skb2);
2025 if (val < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 netlink_overrun(sk);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002027 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002028 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 } else {
2030 p->congested |= val;
2031 p->delivered = 1;
2032 p->skb2 = NULL;
2033 }
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002034out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036}
2037
Eric W. Biederman15e47302012-09-07 20:12:54 +00002038int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002039 u32 group, gfp_t allocation,
2040 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2041 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002043 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 struct sock *sk;
2046
2047 skb = netlink_trim(skb, allocation);
2048
2049 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002050 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002051 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 info.group = group;
2053 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002054 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 info.congested = 0;
2056 info.delivered = 0;
2057 info.allocation = allocation;
2058 info.skb = skb;
2059 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002060 info.tx_filter = filter;
2061 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
2063 /* While we sleep in clone, do not allow to change socket list */
2064
2065 netlink_lock_table();
2066
Sasha Levinb67bfe02013-02-27 17:06:00 -08002067 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 do_one_broadcast(sk, &info);
2069
Neil Horman70d4bf62010-07-20 06:45:56 +00002070 consume_skb(skb);
Tommy S. Christensenaa1c6a62005-05-19 13:07:32 -07002071
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 netlink_unlock_table();
2073
Neil Horman70d4bf62010-07-20 06:45:56 +00002074 if (info.delivery_failure) {
2075 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002076 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002077 }
2078 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002079
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 if (info.delivered) {
2081 if (info.congested && (allocation & __GFP_WAIT))
2082 yield();
2083 return 0;
2084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 return -ESRCH;
2086}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002087EXPORT_SYMBOL(netlink_broadcast_filtered);
2088
Eric W. Biederman15e47302012-09-07 20:12:54 +00002089int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002090 u32 group, gfp_t allocation)
2091{
Eric W. Biederman15e47302012-09-07 20:12:54 +00002092 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002093 NULL, NULL);
2094}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002095EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
2097struct netlink_set_err_data {
2098 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002099 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 u32 group;
2101 int code;
2102};
2103
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00002104static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105{
2106 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002107 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
2109 if (sk == p->exclude_sk)
2110 goto out;
2111
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002112 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002113 goto out;
2114
Eric W. Biederman15e47302012-09-07 20:12:54 +00002115 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002116 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 goto out;
2118
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002119 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002120 ret = 1;
2121 goto out;
2122 }
2123
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 sk->sk_err = p->code;
2125 sk->sk_error_report(sk);
2126out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002127 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128}
2129
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002130/**
2131 * netlink_set_err - report error to broadcast listeners
2132 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002133 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002134 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002135 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002136 *
2137 * This function returns the number of broadcast listeners that have set the
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002138 * NETLINK_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002139 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002140int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141{
2142 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002144 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145
2146 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002147 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002149 /* sk->sk_err wants a positive error value */
2150 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 read_lock(&nl_table_lock);
2153
Sasha Levinb67bfe02013-02-27 17:06:00 -08002154 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002155 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
2157 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002158 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002160EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
Johannes Berg84659eb2007-07-18 15:47:05 -07002162/* must be called with netlink table grabbed */
2163static void netlink_update_socket_mc(struct netlink_sock *nlk,
2164 unsigned int group,
2165 int is_new)
2166{
2167 int old, new = !!is_new, subscriptions;
2168
2169 old = test_bit(group - 1, nlk->groups);
2170 subscriptions = nlk->subscriptions - old + new;
2171 if (new)
2172 __set_bit(group - 1, nlk->groups);
2173 else
2174 __clear_bit(group - 1, nlk->groups);
2175 netlink_update_subscriptions(&nlk->sk, subscriptions);
2176 netlink_update_listeners(&nlk->sk);
2177}
2178
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002179static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002180 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002181{
2182 struct sock *sk = sock->sk;
2183 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002184 unsigned int val = 0;
2185 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002186
2187 if (level != SOL_NETLINK)
2188 return -ENOPROTOOPT;
2189
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002190 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2191 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002192 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002193 return -EFAULT;
2194
2195 switch (optname) {
2196 case NETLINK_PKTINFO:
2197 if (val)
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002198 nlk->flags |= NETLINK_F_RECV_PKTINFO;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002199 else
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002200 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002201 err = 0;
2202 break;
2203 case NETLINK_ADD_MEMBERSHIP:
2204 case NETLINK_DROP_MEMBERSHIP: {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002205 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002206 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002207 err = netlink_realloc_groups(sk);
2208 if (err)
2209 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002210 if (!val || val - 1 >= nlk->ngroups)
2211 return -EINVAL;
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002212 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01002213 err = nlk->netlink_bind(sock_net(sk), val);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04002214 if (err)
2215 return err;
2216 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002217 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002218 netlink_update_socket_mc(nlk, val,
2219 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002220 netlink_table_ungrab();
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002221 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
Johannes Berg023e2cf2014-12-23 21:00:06 +01002222 nlk->netlink_unbind(sock_net(sk), val);
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002223
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002224 err = 0;
2225 break;
2226 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002227 case NETLINK_BROADCAST_ERROR:
2228 if (val)
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002229 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002230 else
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002231 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002232 err = 0;
2233 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002234 case NETLINK_NO_ENOBUFS:
2235 if (val) {
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002236 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2237 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002238 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002239 } else {
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002240 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002241 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002242 err = 0;
2243 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002244#ifdef CONFIG_NETLINK_MMAP
2245 case NETLINK_RX_RING:
2246 case NETLINK_TX_RING: {
2247 struct nl_mmap_req req;
2248
2249 /* Rings might consume more memory than queue limits, require
2250 * CAP_NET_ADMIN.
2251 */
2252 if (!capable(CAP_NET_ADMIN))
2253 return -EPERM;
2254 if (optlen < sizeof(req))
2255 return -EINVAL;
2256 if (copy_from_user(&req, optval, sizeof(req)))
2257 return -EFAULT;
Florian Westphal0470eb92015-07-21 16:33:50 +02002258 err = netlink_set_ring(sk, &req,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002259 optname == NETLINK_TX_RING);
2260 break;
2261 }
2262#endif /* CONFIG_NETLINK_MMAP */
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002263 case NETLINK_LISTEN_ALL_NSID:
2264 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2265 return -EPERM;
2266
2267 if (val)
2268 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
2269 else
2270 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
2271 err = 0;
2272 break;
Christophe Ricard0a6a3a22015-08-28 07:07:48 +02002273 case NETLINK_CAP_ACK:
2274 if (val)
2275 nlk->flags |= NETLINK_F_CAP_ACK;
2276 else
2277 nlk->flags &= ~NETLINK_F_CAP_ACK;
2278 err = 0;
2279 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002280 default:
2281 err = -ENOPROTOOPT;
2282 }
2283 return err;
2284}
2285
2286static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002287 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002288{
2289 struct sock *sk = sock->sk;
2290 struct netlink_sock *nlk = nlk_sk(sk);
2291 int len, val, err;
2292
2293 if (level != SOL_NETLINK)
2294 return -ENOPROTOOPT;
2295
2296 if (get_user(len, optlen))
2297 return -EFAULT;
2298 if (len < 0)
2299 return -EINVAL;
2300
2301 switch (optname) {
2302 case NETLINK_PKTINFO:
2303 if (len < sizeof(int))
2304 return -EINVAL;
2305 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002306 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002307 if (put_user(len, optlen) ||
2308 put_user(val, optval))
2309 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002310 err = 0;
2311 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002312 case NETLINK_BROADCAST_ERROR:
2313 if (len < sizeof(int))
2314 return -EINVAL;
2315 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002316 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002317 if (put_user(len, optlen) ||
2318 put_user(val, optval))
2319 return -EFAULT;
2320 err = 0;
2321 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002322 case NETLINK_NO_ENOBUFS:
2323 if (len < sizeof(int))
2324 return -EINVAL;
2325 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002326 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002327 if (put_user(len, optlen) ||
2328 put_user(val, optval))
2329 return -EFAULT;
2330 err = 0;
2331 break;
David Herrmannb42be382015-06-17 17:14:33 +02002332 case NETLINK_LIST_MEMBERSHIPS: {
2333 int pos, idx, shift;
2334
2335 err = 0;
2336 netlink_table_grab();
2337 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
2338 if (len - pos < sizeof(u32))
2339 break;
2340
2341 idx = pos / sizeof(unsigned long);
2342 shift = (pos % sizeof(unsigned long)) * 8;
2343 if (put_user((u32)(nlk->groups[idx] >> shift),
2344 (u32 __user *)(optval + pos))) {
2345 err = -EFAULT;
2346 break;
2347 }
2348 }
2349 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
2350 err = -EFAULT;
2351 netlink_table_ungrab();
2352 break;
2353 }
Christophe Ricard0a6a3a22015-08-28 07:07:48 +02002354 case NETLINK_CAP_ACK:
2355 if (len < sizeof(int))
2356 return -EINVAL;
2357 len = sizeof(int);
2358 val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
2359 if (put_user(len, optlen) ||
2360 put_user(val, optval))
2361 return -EFAULT;
2362 err = 0;
2363 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002364 default:
2365 err = -ENOPROTOOPT;
2366 }
2367 return err;
2368}
2369
2370static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2371{
2372 struct nl_pktinfo info;
2373
2374 info.group = NETLINK_CB(skb).dst_group;
2375 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2376}
2377
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002378static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
2379 struct sk_buff *skb)
2380{
2381 if (!NETLINK_CB(skb).nsid_is_set)
2382 return;
2383
2384 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
2385 &NETLINK_CB(skb).nsid);
2386}
2387
Ying Xue1b784142015-03-02 15:37:48 +08002388static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 struct sock *sk = sock->sk;
2391 struct netlink_sock *nlk = nlk_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002392 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002393 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002394 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 struct sk_buff *skb;
2396 int err;
2397 struct scm_cookie scm;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002398 u32 netlink_skb_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
2400 if (msg->msg_flags&MSG_OOB)
2401 return -EOPNOTSUPP;
2402
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002403 err = scm_send(sock, msg, &scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 if (err < 0)
2405 return err;
2406
2407 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002408 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002410 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002411 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002412 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002413 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002414 if ((dst_group || dst_portid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002415 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002416 goto out;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002417 netlink_skb_flags |= NETLINK_SKB_DST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002419 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002420 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 }
2422
Eric W. Biederman15e47302012-09-07 20:12:54 +00002423 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 err = netlink_autobind(sock);
2425 if (err)
2426 goto out;
2427 }
2428
Al Viroa8866ff2014-12-12 23:02:36 -05002429 /* It's a really convoluted way for userland to ask for mmaped
2430 * sendmsg(), but that's what we've got...
2431 */
Patrick McHardy5fd96122013-04-17 06:47:03 +00002432 if (netlink_tx_is_mmaped(sk) &&
Ken-ichirou MATSUZAWAc953e23932015-08-20 12:43:53 +09002433 iter_is_iovec(&msg->msg_iter) &&
Al Viroa8866ff2014-12-12 23:02:36 -05002434 msg->msg_iter.nr_segs == 1 &&
Al Viroc0371da2014-11-24 10:42:55 -05002435 msg->msg_iter.iov->iov_base == NULL) {
Patrick McHardy5fd96122013-04-17 06:47:03 +00002436 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002437 &scm);
Patrick McHardy5fd96122013-04-17 06:47:03 +00002438 goto out;
2439 }
2440
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 err = -EMSGSIZE;
2442 if (len > sk->sk_sndbuf - 32)
2443 goto out;
2444 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002445 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002446 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 goto out;
2448
Eric W. Biederman15e47302012-09-07 20:12:54 +00002449 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002450 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002451 NETLINK_CB(skb).creds = scm.creds;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002452 NETLINK_CB(skb).flags = netlink_skb_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 err = -EFAULT;
Al Viro6ce8e9c2014-04-06 21:25:44 -04002455 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 kfree_skb(skb);
2457 goto out;
2458 }
2459
2460 err = security_netlink_send(sk, skb);
2461 if (err) {
2462 kfree_skb(skb);
2463 goto out;
2464 }
2465
Patrick McHardyd629b832005-08-14 19:27:50 -07002466 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002468 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002470 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471
2472out:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002473 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 return err;
2475}
2476
Ying Xue1b784142015-03-02 15:37:48 +08002477static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 int flags)
2479{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 struct scm_cookie scm;
2481 struct sock *sk = sock->sk;
2482 struct netlink_sock *nlk = nlk_sk(sk);
2483 int noblock = flags&MSG_DONTWAIT;
2484 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002485 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002486 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
2488 if (flags&MSG_OOB)
2489 return -EOPNOTSUPP;
2490
2491 copied = 0;
2492
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002493 skb = skb_recv_datagram(sk, flags, noblock, &err);
2494 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 goto out;
2496
Johannes Berg68d6ac62010-08-15 21:20:44 +00002497 data_skb = skb;
2498
Johannes Berg1dacc762009-07-01 11:26:02 +00002499#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2500 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002501 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002502 * If this skb has a frag_list, then here that means that we
2503 * will have to use the frag_list skb's data for compat tasks
2504 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002505 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002506 * If we need to send the compat skb, assign it to the
2507 * 'data_skb' variable so that it will be used below for data
2508 * copying. We keep 'skb' for everything else, including
2509 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002510 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002511 if (flags & MSG_CMSG_COMPAT)
2512 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002513 }
2514#endif
2515
Eric Dumazet9063e212014-03-07 12:02:33 -08002516 /* Record the max length of recvmsg() calls for future allocations */
2517 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2518 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2519 16384);
2520
Johannes Berg68d6ac62010-08-15 21:20:44 +00002521 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 if (len < copied) {
2523 msg->msg_flags |= MSG_TRUNC;
2524 copied = len;
2525 }
2526
Johannes Berg68d6ac62010-08-15 21:20:44 +00002527 skb_reset_transport_header(data_skb);
David S. Miller51f3d022014-11-05 16:46:40 -05002528 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
2530 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002531 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 addr->nl_family = AF_NETLINK;
2533 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002534 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002535 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 msg->msg_namelen = sizeof(*addr);
2537 }
2538
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002539 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002540 netlink_cmsg_recv_pktinfo(msg, skb);
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002541 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2542 netlink_cmsg_listen_all_nsid(sk, msg, skb);
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002543
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002544 memset(&scm, 0, sizeof(scm));
2545 scm.creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002546 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002547 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 skb_free_datagram(sk, skb);
2550
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002551 if (nlk->cb_running &&
2552 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002553 ret = netlink_dump(sk);
2554 if (ret) {
Ben Pfaffac30ef82014-07-09 10:31:22 -07002555 sk->sk_err = -ret;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002556 sk->sk_error_report(sk);
2557 }
2558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002560 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561out:
2562 netlink_rcv_wake(sk);
2563 return err ? : copied;
2564}
2565
David S. Miller676d2362014-04-11 16:15:36 -04002566static void netlink_data_ready(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002568 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569}
2570
2571/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002572 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 * complete set of kernel non-blocking support for message
2574 * queueing.
2575 */
2576
2577struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002578__netlink_kernel_create(struct net *net, int unit, struct module *module,
2579 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580{
2581 struct socket *sock;
2582 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002583 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002584 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002585 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2586 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002588 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002590 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 return NULL;
2592
2593 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2594 return NULL;
Eric W. Biederman13d30782015-05-08 21:11:33 -05002595
2596 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002597 goto out_sock_release_nosk;
2598
2599 sk = sock->sk;
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002600
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002601 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002602 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002603 else
2604 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002605
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002606 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002607 if (!listeners)
2608 goto out_sock_release;
2609
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002611 if (cfg && cfg->input)
2612 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613
Herbert Xu8ea65f42015-01-26 14:02:56 +11002614 if (netlink_insert(sk, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002615 goto out_sock_release;
2616
2617 nlk = nlk_sk(sk);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002618 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002619
2620 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002621 if (!nl_table[unit].registered) {
2622 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002623 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002624 nl_table[unit].cb_mutex = cb_mutex;
2625 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002626 if (cfg) {
2627 nl_table[unit].bind = cfg->bind;
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09002628 nl_table[unit].unbind = cfg->unbind;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002629 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002630 if (cfg->compare)
2631 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002632 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002633 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002634 } else {
2635 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002636 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002637 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002638 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002639 return sk;
2640
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002641out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002642 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002643 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002644 return NULL;
2645
2646out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002647 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002648 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002650EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002651
2652void
2653netlink_kernel_release(struct sock *sk)
2654{
Eric W. Biederman13d30782015-05-08 21:11:33 -05002655 if (sk == NULL || sk->sk_socket == NULL)
2656 return;
2657
2658 sock_release(sk->sk_socket);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002659}
2660EXPORT_SYMBOL(netlink_kernel_release);
2661
Johannes Bergd136f1b2009-09-12 03:03:15 +00002662int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002663{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002664 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002665 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002666
2667 if (groups < 32)
2668 groups = 32;
2669
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002670 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002671 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2672 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002673 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002674 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002675 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2676 rcu_assign_pointer(tbl->listeners, new);
2677
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002678 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002679 }
2680 tbl->groups = groups;
2681
Johannes Bergd136f1b2009-09-12 03:03:15 +00002682 return 0;
2683}
2684
2685/**
2686 * netlink_change_ngroups - change number of multicast groups
2687 *
2688 * This changes the number of multicast groups that are available
2689 * on a certain netlink family. Note that it is not possible to
2690 * change the number of groups to below 32. Also note that it does
2691 * not implicitly call netlink_clear_multicast_users() when the
2692 * number of groups is reduced.
2693 *
2694 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2695 * @groups: The new number of groups.
2696 */
2697int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2698{
2699 int err;
2700
2701 netlink_table_grab();
2702 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002703 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002704
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002705 return err;
2706}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002707
Johannes Bergb8273572009-09-24 15:44:05 -07002708void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2709{
2710 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002711 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2712
Sasha Levinb67bfe02013-02-27 17:06:00 -08002713 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002714 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2715}
2716
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002717struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002718__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002719{
2720 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002721 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002722
Wang Yufen23b45672014-02-17 16:53:32 +08002723 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002724 nlh->nlmsg_type = type;
2725 nlh->nlmsg_len = size;
2726 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002727 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002728 nlh->nlmsg_seq = seq;
2729 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002730 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002731 return nlh;
2732}
2733EXPORT_SYMBOL(__nlmsg_put);
2734
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735/*
2736 * It looks a bit ugly.
2737 * It would be better to create kernel thread.
2738 */
2739
2740static int netlink_dump(struct sock *sk)
2741{
2742 struct netlink_sock *nlk = nlk_sk(sk);
2743 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002744 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 struct nlmsghdr *nlh;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002746 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002747 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002749 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002750 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002751 err = -EINVAL;
2752 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 }
2754
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002755 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002756 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2757
Patrick McHardyf9c22882013-04-17 06:47:04 +00002758 if (!netlink_rx_is_mmaped(sk) &&
2759 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2760 goto errout_skb;
Eric Dumazet9063e212014-03-07 12:02:33 -08002761
2762 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2763 * required, but it makes sense to _attempt_ a 16K bytes allocation
2764 * to reduce number of system calls on dump operations, if user
2765 * ever provided a big enough buffer.
2766 */
2767 if (alloc_size < nlk->max_recvmsg_len) {
2768 skb = netlink_alloc_skb(sk,
2769 nlk->max_recvmsg_len,
2770 nlk->portid,
2771 GFP_KERNEL |
2772 __GFP_NOWARN |
2773 __GFP_NORETRY);
2774 /* available room should be exact amount to avoid MSG_TRUNC */
2775 if (skb)
2776 skb_reserve(skb, skb_tailroom(skb) -
2777 nlk->max_recvmsg_len);
2778 }
2779 if (!skb)
2780 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2781 GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002782 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002783 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002784 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002785
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 len = cb->dump(skb, cb);
2787
2788 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002789 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002790
2791 if (sk_filter(sk, skb))
2792 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002793 else
2794 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 return 0;
2796 }
2797
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002798 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2799 if (!nlh)
2800 goto errout_skb;
2801
Johannes Berg670dc282011-06-20 13:40:46 +02002802 nl_dump_check_consistent(cb, nlh);
2803
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002804 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2805
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002806 if (sk_filter(sk, skb))
2807 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002808 else
2809 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
Thomas Grafa8f74b22005-11-10 02:25:52 +01002811 if (cb->done)
2812 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002814 nlk->cb_running = false;
2815 mutex_unlock(nlk->cb_mutex);
Gao feng6dc878a2012-10-04 20:15:48 +00002816 module_put(cb->module);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002817 consume_skb(cb->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002819
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002820errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002821 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002822 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002823 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824}
2825
Gao feng6dc878a2012-10-04 20:15:48 +00002826int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2827 const struct nlmsghdr *nlh,
2828 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829{
2830 struct netlink_callback *cb;
2831 struct sock *sk;
2832 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002833 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
Patrick McHardyf9c22882013-04-17 06:47:04 +00002835 /* Memory mapped dump requests need to be copied to avoid looping
2836 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2837 * a reference to the skb.
2838 */
2839 if (netlink_skb_is_mmaped(skb)) {
2840 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002841 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002842 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002843 } else
2844 atomic_inc(&skb->users);
2845
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002846 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2847 if (sk == NULL) {
2848 ret = -ECONNREFUSED;
2849 goto error_free;
2850 }
2851
2852 nlk = nlk_sk(sk);
2853 mutex_lock(nlk->cb_mutex);
2854 /* A dump is in progress... */
2855 if (nlk->cb_running) {
2856 ret = -EBUSY;
2857 goto error_unlock;
2858 }
2859 /* add reference of module which cb->dump belongs to */
2860 if (!try_module_get(control->module)) {
2861 ret = -EPROTONOSUPPORT;
2862 goto error_unlock;
2863 }
2864
2865 cb = &nlk->cb;
2866 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002867 cb->dump = control->dump;
2868 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002870 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002871 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002872 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 cb->skb = skb;
2874
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002875 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002876
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002877 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
Andrey Vaginb44d2112011-02-21 02:40:47 +00002879 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002881
Andrey Vaginb44d2112011-02-21 02:40:47 +00002882 if (ret)
2883 return ret;
2884
Denis V. Lunev5c582982007-10-23 20:29:25 -07002885 /* We successfully started a dump, by returning -EINTR we
2886 * signal not to send ACK even if it was requested.
2887 */
2888 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002889
2890error_unlock:
2891 sock_put(sk);
2892 mutex_unlock(nlk->cb_mutex);
2893error_free:
2894 kfree_skb(skb);
2895 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896}
Gao feng6dc878a2012-10-04 20:15:48 +00002897EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898
2899void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2900{
2901 struct sk_buff *skb;
2902 struct nlmsghdr *rep;
2903 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002904 size_t payload = sizeof(*errmsg);
Christophe Ricard0a6a3a22015-08-28 07:07:48 +02002905 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906
Christophe Ricard0a6a3a22015-08-28 07:07:48 +02002907 /* Error messages get the original request appened, unless the user
2908 * requests to cap the error message.
2909 */
2910 if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
Thomas Graf339bf982006-11-10 14:10:15 -08002911 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
Patrick McHardyf9c22882013-04-17 06:47:04 +00002913 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2914 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 if (!skb) {
2916 struct sock *sk;
2917
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002918 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002919 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002920 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 if (sk) {
2922 sk->sk_err = ENOBUFS;
2923 sk->sk_error_report(sk);
2924 sock_put(sk);
2925 }
2926 return;
2927 }
2928
Eric W. Biederman15e47302012-09-07 20:12:54 +00002929 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002930 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002931 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 errmsg->error = err;
Christophe Ricard0a6a3a22015-08-28 07:07:48 +02002933 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002934 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002936EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002938int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002939 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002940{
Thomas Graf82ace472005-11-10 02:25:53 +01002941 struct nlmsghdr *nlh;
2942 int err;
2943
2944 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002945 int msglen;
2946
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002947 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002948 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002949
Martin Murrayad8e4b72006-01-10 13:02:29 -08002950 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002951 return 0;
2952
Thomas Grafd35b6852007-03-22 23:28:46 -07002953 /* Only requests are handled by the kernel */
2954 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002955 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002956
Thomas Graf45e7ae72007-03-22 23:29:10 -07002957 /* Skip control messages */
2958 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002959 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002960
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002961 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002962 if (err == -EINTR)
2963 goto skip;
2964
2965ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002966 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002967 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002968
Denis V. Lunev5c582982007-10-23 20:29:25 -07002969skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002970 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002971 if (msglen > skb->len)
2972 msglen = skb->len;
2973 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002974 }
2975
2976 return 0;
2977}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002978EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002979
2980/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002981 * nlmsg_notify - send a notification netlink message
2982 * @sk: netlink socket to use
2983 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002984 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002985 * @group: destination multicast group or 0
2986 * @report: 1 to report back, 0 to disable
2987 * @flags: allocation flags
2988 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002989int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002990 unsigned int group, int report, gfp_t flags)
2991{
2992 int err = 0;
2993
2994 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002995 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002996
2997 if (report) {
2998 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002999 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07003000 }
3001
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08003002 /* errors reported via destination sk->sk_err, but propagate
3003 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00003004 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07003005 }
3006
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08003007 if (report) {
3008 int err2;
3009
Eric W. Biederman15e47302012-09-07 20:12:54 +00003010 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08003011 if (!err || err == -ESRCH)
3012 err = err2;
3013 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07003014
3015 return err;
3016}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003017EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07003018
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019#ifdef CONFIG_PROC_FS
3020struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08003021 struct seq_net_private p;
Herbert Xu56d28b12015-02-04 07:33:24 +11003022 struct rhashtable_iter hti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 int link;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024};
3025
Herbert Xu56d28b12015-02-04 07:33:24 +11003026static int netlink_walk_start(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027{
Herbert Xu56d28b12015-02-04 07:33:24 +11003028 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029
Herbert Xu56d28b12015-02-04 07:33:24 +11003030 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
3031 if (err) {
3032 iter->link = MAX_LINKS;
3033 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 }
Herbert Xu56d28b12015-02-04 07:33:24 +11003035
3036 err = rhashtable_walk_start(&iter->hti);
3037 return err == -EAGAIN ? 0 : err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038}
3039
Herbert Xu56d28b12015-02-04 07:33:24 +11003040static void netlink_walk_stop(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041{
Herbert Xu56d28b12015-02-04 07:33:24 +11003042 rhashtable_walk_stop(&iter->hti);
3043 rhashtable_walk_exit(&iter->hti);
3044}
3045
3046static void *__netlink_seq_next(struct seq_file *seq)
3047{
3048 struct nl_seq_iter *iter = seq->private;
3049 struct netlink_sock *nlk;
3050
3051 do {
3052 for (;;) {
3053 int err;
3054
3055 nlk = rhashtable_walk_next(&iter->hti);
3056
3057 if (IS_ERR(nlk)) {
3058 if (PTR_ERR(nlk) == -EAGAIN)
3059 continue;
3060
3061 return nlk;
3062 }
3063
3064 if (nlk)
3065 break;
3066
3067 netlink_walk_stop(iter);
3068 if (++iter->link >= MAX_LINKS)
3069 return NULL;
3070
3071 err = netlink_walk_start(iter);
3072 if (err)
3073 return ERR_PTR(err);
3074 }
3075 } while (sock_net(&nlk->sk) != seq_file_net(seq));
3076
3077 return nlk;
3078}
3079
3080static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3081{
3082 struct nl_seq_iter *iter = seq->private;
3083 void *obj = SEQ_START_TOKEN;
3084 loff_t pos;
3085 int err;
3086
3087 iter->link = 0;
3088
3089 err = netlink_walk_start(iter);
3090 if (err)
3091 return ERR_PTR(err);
3092
3093 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3094 obj = __netlink_seq_next(seq);
3095
3096 return obj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097}
3098
3099static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3100{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 ++*pos;
Herbert Xu56d28b12015-02-04 07:33:24 +11003102 return __netlink_seq_next(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103}
3104
3105static void netlink_seq_stop(struct seq_file *seq, void *v)
3106{
Herbert Xu56d28b12015-02-04 07:33:24 +11003107 struct nl_seq_iter *iter = seq->private;
3108
3109 if (iter->link >= MAX_LINKS)
3110 return;
3111
3112 netlink_walk_stop(iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113}
3114
3115
3116static int netlink_seq_show(struct seq_file *seq, void *v)
3117{
Eric Dumazet658cb352012-04-22 21:30:21 +00003118 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 seq_puts(seq,
3120 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003121 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00003122 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 struct sock *s = v;
3124 struct netlink_sock *nlk = nlk_sk(s);
3125
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003126 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 s,
3128 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003129 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07003130 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07003131 sk_rmem_alloc_get(s),
3132 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003133 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07003134 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003135 atomic_read(&s->sk_drops),
3136 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 );
3138
3139 }
3140 return 0;
3141}
3142
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003143static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 .start = netlink_seq_start,
3145 .next = netlink_seq_next,
3146 .stop = netlink_seq_stop,
3147 .show = netlink_seq_show,
3148};
3149
3150
3151static int netlink_seq_open(struct inode *inode, struct file *file)
3152{
Denis V. Luneve372c412007-11-19 22:31:54 -08003153 return seq_open_net(inode, file, &netlink_seq_ops,
3154 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003155}
3156
Arjan van de Venda7071d2007-02-12 00:55:36 -08003157static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 .owner = THIS_MODULE,
3159 .open = netlink_seq_open,
3160 .read = seq_read,
3161 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003162 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163};
3164
3165#endif
3166
3167int netlink_register_notifier(struct notifier_block *nb)
3168{
Alan Sterne041c682006-03-27 01:16:30 -08003169 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003171EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
3173int netlink_unregister_notifier(struct notifier_block *nb)
3174{
Alan Sterne041c682006-03-27 01:16:30 -08003175 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003177EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003178
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003179static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 .family = PF_NETLINK,
3181 .owner = THIS_MODULE,
3182 .release = netlink_release,
3183 .bind = netlink_bind,
3184 .connect = netlink_connect,
3185 .socketpair = sock_no_socketpair,
3186 .accept = sock_no_accept,
3187 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003188 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 .ioctl = sock_no_ioctl,
3190 .listen = sock_no_listen,
3191 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003192 .setsockopt = netlink_setsockopt,
3193 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 .sendmsg = netlink_sendmsg,
3195 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003196 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 .sendpage = sock_no_sendpage,
3198};
3199
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003200static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 .family = PF_NETLINK,
3202 .create = netlink_create,
3203 .owner = THIS_MODULE, /* for consistency 8) */
3204};
3205
Pavel Emelyanov46650792007-10-08 20:38:39 -07003206static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003207{
3208#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003209 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003210 return -ENOMEM;
3211#endif
3212 return 0;
3213}
3214
Pavel Emelyanov46650792007-10-08 20:38:39 -07003215static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003216{
3217#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003218 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003219#endif
3220}
3221
David S. Millerb963ea82010-08-30 19:08:01 -07003222static void __init netlink_add_usersock_entry(void)
3223{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003224 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003225 int groups = 32;
3226
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003227 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003228 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003229 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003230
3231 netlink_table_grab();
3232
3233 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003234 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003235 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3236 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003237 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003238
3239 netlink_table_ungrab();
3240}
3241
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003242static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003243 .init = netlink_net_init,
3244 .exit = netlink_net_exit,
3245};
3246
Patrick McHardy49f7b332015-03-25 13:07:45 +00003247static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
Herbert Xuc428ecd2015-03-20 21:57:01 +11003248{
3249 const struct netlink_sock *nlk = data;
3250 struct netlink_compare_arg arg;
3251
3252 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
Herbert Xu11b58ba2015-03-24 00:50:22 +11003253 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
Herbert Xuc428ecd2015-03-20 21:57:01 +11003254}
3255
3256static const struct rhashtable_params netlink_rhashtable_params = {
3257 .head_offset = offsetof(struct netlink_sock, node),
3258 .key_len = netlink_compare_arg_len,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003259 .obj_hashfn = netlink_hash,
3260 .obj_cmpfn = netlink_compare,
Thomas Grafb5e2c152015-03-24 20:42:19 +00003261 .automatic_shrinking = true,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003262};
3263
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264static int __init netlink_proto_init(void)
3265{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 int err = proto_register(&netlink_proto, 0);
3268
3269 if (err != 0)
3270 goto out;
3271
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003272 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003274 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003275 if (!nl_table)
3276 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 for (i = 0; i < MAX_LINKS; i++) {
Herbert Xuc428ecd2015-03-20 21:57:01 +11003279 if (rhashtable_init(&nl_table[i].hash,
3280 &netlink_rhashtable_params) < 0) {
Thomas Grafe3416942014-08-02 11:47:45 +02003281 while (--i > 0)
3282 rhashtable_destroy(&nl_table[i].hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003284 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 }
3287
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003288 INIT_LIST_HEAD(&netlink_tap_all);
3289
David S. Millerb963ea82010-08-30 19:08:01 -07003290 netlink_add_usersock_entry();
3291
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003293 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003294 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 rtnetlink_init();
3296out:
3297 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003298panic:
3299 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300}
3301
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302core_initcall(netlink_proto_init);