blob: d8e2e3918ce2fd95637c4cba8bfc4886feb91ea6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NETLINK Kernel-user communication protocol.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Patrick McHardycd1df522013-04-17 06:47:05 +00006 * Patrick McHardy <kaber@trash.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +090012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
Harald Welte4fdb3bb2005-08-09 19:40:55 -070017 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/module.h>
26
Randy Dunlap4fc268d2006-01-11 12:17:47 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/kernel.h>
29#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010057#include <linux/audit.h>
Patrick McHardyaf65bdf2007-04-20 14:14:21 -070058#include <linux/mutex.h>
Patrick McHardyccdfcc32013-04-17 06:47:01 +000059#include <linux/vmalloc.h>
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +020060#include <linux/if_arp.h>
Thomas Grafe3416942014-08-02 11:47:45 +020061#include <linux/rhashtable.h>
Patrick McHardy9652e932013-04-17 06:47:02 +000062#include <asm/cacheflush.h>
Thomas Grafe3416942014-08-02 11:47:45 +020063#include <linux/hash.h>
Johannes Bergee1c24422015-01-16 11:37:14 +010064#include <linux/genetlink.h>
Andrew Morton54e0f522005-04-30 07:07:04 +010065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/sock.h>
68#include <net/scm.h>
Thomas Graf82ace472005-11-10 02:25:53 +010069#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Andrey Vagin0f29c762013-03-21 20:33:47 +040071#include "af_netlink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Eric Dumazet5c398dc2010-10-24 04:27:10 +000073struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
Johannes Berg6c04bb12009-07-10 09:51:32 +000076};
77
Patrick McHardycd967e02013-04-17 06:46:56 +000078/* state bits */
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020079#define NETLINK_S_CONGESTED 0x0
Patrick McHardycd967e02013-04-17 06:46:56 +000080
81/* flags */
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020082#define NETLINK_F_KERNEL_SOCKET 0x1
83#define NETLINK_F_RECV_PKTINFO 0x2
84#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85#define NETLINK_F_RECV_NO_ENOBUFS 0x8
Nicolas Dichtel59324cf2015-05-07 11:02:53 +020086#define NETLINK_F_LISTEN_ALL_NSID 0x10
Patrick McHardy77247bb2005-08-14 19:27:13 -070087
David S. Miller035c4c12011-12-23 17:33:03 -050088static inline int netlink_is_kernel(struct sock *sk)
Denis V. Lunevaed81562007-10-10 21:14:32 -070089{
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +020090 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
Denis V. Lunevaed81562007-10-10 21:14:32 -070091}
92
Eric Dumazet91dd93f2015-05-12 17:24:50 -070093struct netlink_table *nl_table __read_mostly;
Andrey Vagin0f29c762013-03-21 20:33:47 +040094EXPORT_SYMBOL_GPL(nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
97
98static int netlink_dump(struct sock *sk);
Patrick McHardy9652e932013-04-17 06:47:02 +000099static void netlink_skb_destructor(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Thomas Graf78fd1d02014-10-21 22:05:38 +0200101/* nl_table locking explained:
Thomas Graf21e49022015-01-02 23:00:22 +0100102 * Lookup and traversal are protected with an RCU read-side lock. Insertion
Ying Xuec5adde92015-01-12 14:52:23 +0800103 * and removal are protected with per bucket lock while using RCU list
Thomas Graf21e49022015-01-02 23:00:22 +0100104 * modification primitives and may run in parallel to RCU protected lookups.
105 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
106 * been acquired * either during or after the socket has been removed from
107 * the list and after an RCU grace period.
Thomas Graf78fd1d02014-10-21 22:05:38 +0200108 */
Andrey Vagin0f29c762013-03-21 20:33:47 +0400109DEFINE_RWLOCK(nl_table_lock);
110EXPORT_SYMBOL_GPL(nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static atomic_t nl_table_users = ATOMIC_INIT(0);
112
Eric Dumazet6d772ac2012-10-18 03:21:55 +0000113#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
114
Alan Sterne041c682006-03-27 01:16:30 -0800115static ATOMIC_NOTIFIER_HEAD(netlink_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200117static DEFINE_SPINLOCK(netlink_tap_lock);
118static struct list_head netlink_tap_all __read_mostly;
119
Herbert Xuc428ecd2015-03-20 21:57:01 +1100120static const struct rhashtable_params netlink_rhashtable_params;
121
stephen hemmingerb57ef81f2011-12-22 08:52:02 +0000122static inline u32 netlink_group_mask(u32 group)
Patrick McHardyd629b832005-08-14 19:27:50 -0700123{
124 return group ? 1 << (group - 1) : 0;
125}
126
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200127int netlink_add_tap(struct netlink_tap *nt)
128{
129 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
130 return -EINVAL;
131
132 spin_lock(&netlink_tap_lock);
133 list_add_rcu(&nt->list, &netlink_tap_all);
134 spin_unlock(&netlink_tap_lock);
135
Markus Elfringfcd4d352014-11-18 21:03:13 +0100136 __module_get(nt->module);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200137
138 return 0;
139}
140EXPORT_SYMBOL_GPL(netlink_add_tap);
141
stephen hemminger2173f8d2013-12-30 10:49:22 -0800142static int __netlink_remove_tap(struct netlink_tap *nt)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200143{
144 bool found = false;
145 struct netlink_tap *tmp;
146
147 spin_lock(&netlink_tap_lock);
148
149 list_for_each_entry(tmp, &netlink_tap_all, list) {
150 if (nt == tmp) {
151 list_del_rcu(&nt->list);
152 found = true;
153 goto out;
154 }
155 }
156
157 pr_warn("__netlink_remove_tap: %p not found\n", nt);
158out:
159 spin_unlock(&netlink_tap_lock);
160
Markus Elfring92b80eb2015-07-02 18:38:12 +0200161 if (found)
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200162 module_put(nt->module);
163
164 return found ? 0 : -ENODEV;
165}
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200166
167int netlink_remove_tap(struct netlink_tap *nt)
168{
169 int ret;
170
171 ret = __netlink_remove_tap(nt);
172 synchronize_net();
173
174 return ret;
175}
176EXPORT_SYMBOL_GPL(netlink_remove_tap);
177
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200178static bool netlink_filter_tap(const struct sk_buff *skb)
179{
180 struct sock *sk = skb->sk;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200181
182 /* We take the more conservative approach and
183 * whitelist socket protocols that may pass.
184 */
185 switch (sk->sk_protocol) {
186 case NETLINK_ROUTE:
187 case NETLINK_USERSOCK:
188 case NETLINK_SOCK_DIAG:
189 case NETLINK_NFLOG:
190 case NETLINK_XFRM:
191 case NETLINK_FIB_LOOKUP:
192 case NETLINK_NETFILTER:
193 case NETLINK_GENERIC:
Varka Bhadram498044b2014-07-16 10:59:47 +0530194 return true;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200195 }
196
Varka Bhadram498044b2014-07-16 10:59:47 +0530197 return false;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200198}
199
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200200static int __netlink_deliver_tap_skb(struct sk_buff *skb,
201 struct net_device *dev)
202{
203 struct sk_buff *nskb;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200204 struct sock *sk = skb->sk;
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200205 int ret = -ENOMEM;
206
207 dev_hold(dev);
208 nskb = skb_clone(skb, GFP_ATOMIC);
209 if (nskb) {
210 nskb->dev = dev;
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200211 nskb->protocol = htons((u16) sk->sk_protocol);
Daniel Borkmann604d13c2013-12-23 14:35:56 +0100212 nskb->pkt_type = netlink_is_kernel(sk) ?
213 PACKET_KERNEL : PACKET_USER;
Daniel Borkmann4e48ed82014-08-07 22:22:47 +0200214 skb_reset_network_header(nskb);
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200215 ret = dev_queue_xmit(nskb);
216 if (unlikely(ret > 0))
217 ret = net_xmit_errno(ret);
218 }
219
220 dev_put(dev);
221 return ret;
222}
223
224static void __netlink_deliver_tap(struct sk_buff *skb)
225{
226 int ret;
227 struct netlink_tap *tmp;
228
Daniel Borkmann5ffd5cd2013-09-05 17:48:47 +0200229 if (!netlink_filter_tap(skb))
230 return;
231
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +0200232 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
233 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
234 if (unlikely(ret))
235 break;
236 }
237}
238
239static void netlink_deliver_tap(struct sk_buff *skb)
240{
241 rcu_read_lock();
242
243 if (unlikely(!list_empty(&netlink_tap_all)))
244 __netlink_deliver_tap(skb);
245
246 rcu_read_unlock();
247}
248
Daniel Borkmann73bfd372013-12-23 14:35:55 +0100249static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
250 struct sk_buff *skb)
251{
252 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
253 netlink_deliver_tap(skb);
254}
255
Patrick McHardycd1df522013-04-17 06:47:05 +0000256static void netlink_overrun(struct sock *sk)
257{
258 struct netlink_sock *nlk = nlk_sk(sk);
259
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +0200260 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
261 if (!test_and_set_bit(NETLINK_S_CONGESTED,
262 &nlk_sk(sk)->state)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000263 sk->sk_err = ENOBUFS;
264 sk->sk_error_report(sk);
265 }
266 }
267 atomic_inc(&sk->sk_drops);
268}
269
270static void netlink_rcv_wake(struct sock *sk)
271{
272 struct netlink_sock *nlk = nlk_sk(sk);
273
274 if (skb_queue_empty(&sk->sk_receive_queue))
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +0200275 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
276 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
Patrick McHardycd1df522013-04-17 06:47:05 +0000277 wake_up_interruptible(&nlk->wait);
278}
279
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000280#ifdef CONFIG_NETLINK_MMAP
Patrick McHardy9652e932013-04-17 06:47:02 +0000281static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
282{
283 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
284}
285
Patrick McHardyf9c22882013-04-17 06:47:04 +0000286static bool netlink_rx_is_mmaped(struct sock *sk)
287{
288 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
289}
290
Patrick McHardy5fd96122013-04-17 06:47:03 +0000291static bool netlink_tx_is_mmaped(struct sock *sk)
292{
293 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
294}
295
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000296static __pure struct page *pgvec_to_page(const void *addr)
297{
298 if (is_vmalloc_addr(addr))
299 return vmalloc_to_page(addr);
300 else
301 return virt_to_page(addr);
302}
303
304static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
305{
306 unsigned int i;
307
308 for (i = 0; i < len; i++) {
309 if (pg_vec[i] != NULL) {
310 if (is_vmalloc_addr(pg_vec[i]))
311 vfree(pg_vec[i]);
312 else
313 free_pages((unsigned long)pg_vec[i], order);
314 }
315 }
316 kfree(pg_vec);
317}
318
319static void *alloc_one_pg_vec_page(unsigned long order)
320{
321 void *buffer;
322 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
323 __GFP_NOWARN | __GFP_NORETRY;
324
325 buffer = (void *)__get_free_pages(gfp_flags, order);
326 if (buffer != NULL)
327 return buffer;
328
329 buffer = vzalloc((1 << order) * PAGE_SIZE);
330 if (buffer != NULL)
331 return buffer;
332
333 gfp_flags &= ~__GFP_NORETRY;
334 return (void *)__get_free_pages(gfp_flags, order);
335}
336
337static void **alloc_pg_vec(struct netlink_sock *nlk,
338 struct nl_mmap_req *req, unsigned int order)
339{
340 unsigned int block_nr = req->nm_block_nr;
341 unsigned int i;
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200342 void **pg_vec;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000343
344 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
345 if (pg_vec == NULL)
346 return NULL;
347
348 for (i = 0; i < block_nr; i++) {
Daniel Borkmann8a849bb2013-08-02 17:32:39 +0200349 pg_vec[i] = alloc_one_pg_vec_page(order);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000350 if (pg_vec[i] == NULL)
351 goto err1;
352 }
353
354 return pg_vec;
355err1:
356 free_pg_vec(pg_vec, order, block_nr);
357 return NULL;
358}
359
Florian Westphal0470eb92015-07-21 16:33:50 +0200360
361static void
362__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
363 unsigned int order)
364{
365 struct netlink_sock *nlk = nlk_sk(sk);
366 struct sk_buff_head *queue;
367 struct netlink_ring *ring;
368
369 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371
372 spin_lock_bh(&queue->lock);
373
374 ring->frame_max = req->nm_frame_nr - 1;
375 ring->head = 0;
376 ring->frame_size = req->nm_frame_size;
377 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
378
379 swap(ring->pg_vec_len, req->nm_block_nr);
380 swap(ring->pg_vec_order, order);
381 swap(ring->pg_vec, pg_vec);
382
383 __skb_queue_purge(queue);
384 spin_unlock_bh(&queue->lock);
385
386 WARN_ON(atomic_read(&nlk->mapped));
387
388 if (pg_vec)
389 free_pg_vec(pg_vec, order, req->nm_block_nr);
390}
391
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000392static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
Florian Westphal0470eb92015-07-21 16:33:50 +0200393 bool tx_ring)
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000394{
395 struct netlink_sock *nlk = nlk_sk(sk);
396 struct netlink_ring *ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000397 void **pg_vec = NULL;
398 unsigned int order = 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000399
400 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000401
Florian Westphal0470eb92015-07-21 16:33:50 +0200402 if (atomic_read(&nlk->mapped))
403 return -EBUSY;
404 if (atomic_read(&ring->pending))
405 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000406
407 if (req->nm_block_nr) {
408 if (ring->pg_vec != NULL)
409 return -EBUSY;
410
411 if ((int)req->nm_block_size <= 0)
412 return -EINVAL;
Tobias Klauser74e83b22014-07-31 12:17:08 +0200413 if (!PAGE_ALIGNED(req->nm_block_size))
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000414 return -EINVAL;
415 if (req->nm_frame_size < NL_MMAP_HDRLEN)
416 return -EINVAL;
417 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
418 return -EINVAL;
419
420 ring->frames_per_block = req->nm_block_size /
421 req->nm_frame_size;
422 if (ring->frames_per_block == 0)
423 return -EINVAL;
424 if (ring->frames_per_block * req->nm_block_nr !=
425 req->nm_frame_nr)
426 return -EINVAL;
427
428 order = get_order(req->nm_block_size);
429 pg_vec = alloc_pg_vec(nlk, req, order);
430 if (pg_vec == NULL)
431 return -ENOMEM;
432 } else {
433 if (req->nm_frame_nr)
434 return -EINVAL;
435 }
436
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000437 mutex_lock(&nlk->pg_vec_lock);
Florian Westphal0470eb92015-07-21 16:33:50 +0200438 if (atomic_read(&nlk->mapped) == 0) {
439 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
440 mutex_unlock(&nlk->pg_vec_lock);
441 return 0;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000442 }
Florian Westphal0470eb92015-07-21 16:33:50 +0200443
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000444 mutex_unlock(&nlk->pg_vec_lock);
445
446 if (pg_vec)
447 free_pg_vec(pg_vec, order, req->nm_block_nr);
Florian Westphal0470eb92015-07-21 16:33:50 +0200448
449 return -EBUSY;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000450}
451
452static void netlink_mm_open(struct vm_area_struct *vma)
453{
454 struct file *file = vma->vm_file;
455 struct socket *sock = file->private_data;
456 struct sock *sk = sock->sk;
457
458 if (sk)
459 atomic_inc(&nlk_sk(sk)->mapped);
460}
461
462static void netlink_mm_close(struct vm_area_struct *vma)
463{
464 struct file *file = vma->vm_file;
465 struct socket *sock = file->private_data;
466 struct sock *sk = sock->sk;
467
468 if (sk)
469 atomic_dec(&nlk_sk(sk)->mapped);
470}
471
472static const struct vm_operations_struct netlink_mmap_ops = {
473 .open = netlink_mm_open,
474 .close = netlink_mm_close,
475};
476
477static int netlink_mmap(struct file *file, struct socket *sock,
478 struct vm_area_struct *vma)
479{
480 struct sock *sk = sock->sk;
481 struct netlink_sock *nlk = nlk_sk(sk);
482 struct netlink_ring *ring;
483 unsigned long start, size, expected;
484 unsigned int i;
485 int err = -EINVAL;
486
487 if (vma->vm_pgoff)
488 return -EINVAL;
489
490 mutex_lock(&nlk->pg_vec_lock);
491
492 expected = 0;
493 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
494 if (ring->pg_vec == NULL)
495 continue;
496 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
497 }
498
499 if (expected == 0)
500 goto out;
501
502 size = vma->vm_end - vma->vm_start;
503 if (size != expected)
504 goto out;
505
506 start = vma->vm_start;
507 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
508 if (ring->pg_vec == NULL)
509 continue;
510
511 for (i = 0; i < ring->pg_vec_len; i++) {
512 struct page *page;
513 void *kaddr = ring->pg_vec[i];
514 unsigned int pg_num;
515
516 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
517 page = pgvec_to_page(kaddr);
518 err = vm_insert_page(vma, start, page);
519 if (err < 0)
520 goto out;
521 start += PAGE_SIZE;
522 kaddr += PAGE_SIZE;
523 }
524 }
525 }
526
527 atomic_inc(&nlk->mapped);
528 vma->vm_ops = &netlink_mmap_ops;
529 err = 0;
530out:
531 mutex_unlock(&nlk->pg_vec_lock);
Patrick McHardy7cdbac72013-06-11 02:52:47 -0700532 return err;
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000533}
Patrick McHardy9652e932013-04-17 06:47:02 +0000534
David Miller4682a032014-12-16 17:58:17 -0500535static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
Patrick McHardy9652e932013-04-17 06:47:02 +0000536{
537#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
538 struct page *p_start, *p_end;
539
540 /* First page is flushed through netlink_{get,set}_status */
541 p_start = pgvec_to_page(hdr + PAGE_SIZE);
David Miller4682a032014-12-16 17:58:17 -0500542 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
Patrick McHardy9652e932013-04-17 06:47:02 +0000543 while (p_start <= p_end) {
544 flush_dcache_page(p_start);
545 p_start++;
546 }
547#endif
548}
549
550static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
551{
552 smp_rmb();
553 flush_dcache_page(pgvec_to_page(hdr));
554 return hdr->nm_status;
555}
556
557static void netlink_set_status(struct nl_mmap_hdr *hdr,
558 enum nl_mmap_status status)
559{
Thomas Grafa18e6a12014-12-18 10:30:26 +0000560 smp_mb();
Patrick McHardy9652e932013-04-17 06:47:02 +0000561 hdr->nm_status = status;
562 flush_dcache_page(pgvec_to_page(hdr));
Patrick McHardy9652e932013-04-17 06:47:02 +0000563}
564
565static struct nl_mmap_hdr *
566__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
567{
568 unsigned int pg_vec_pos, frame_off;
569
570 pg_vec_pos = pos / ring->frames_per_block;
571 frame_off = pos % ring->frames_per_block;
572
573 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
574}
575
576static struct nl_mmap_hdr *
577netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
578 enum nl_mmap_status status)
579{
580 struct nl_mmap_hdr *hdr;
581
582 hdr = __netlink_lookup_frame(ring, pos);
583 if (netlink_get_status(hdr) != status)
584 return NULL;
585
586 return hdr;
587}
588
589static struct nl_mmap_hdr *
590netlink_current_frame(const struct netlink_ring *ring,
591 enum nl_mmap_status status)
592{
593 return netlink_lookup_frame(ring, ring->head, status);
594}
595
596static struct nl_mmap_hdr *
597netlink_previous_frame(const struct netlink_ring *ring,
598 enum nl_mmap_status status)
599{
600 unsigned int prev;
601
602 prev = ring->head ? ring->head - 1 : ring->frame_max;
603 return netlink_lookup_frame(ring, prev, status);
604}
605
606static void netlink_increment_head(struct netlink_ring *ring)
607{
608 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
609}
610
611static void netlink_forward_ring(struct netlink_ring *ring)
612{
613 unsigned int head = ring->head, pos = head;
614 const struct nl_mmap_hdr *hdr;
615
616 do {
617 hdr = __netlink_lookup_frame(ring, pos);
618 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
619 break;
620 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
621 break;
622 netlink_increment_head(ring);
623 } while (ring->head != head);
624}
625
Patrick McHardycd1df522013-04-17 06:47:05 +0000626static bool netlink_dump_space(struct netlink_sock *nlk)
627{
628 struct netlink_ring *ring = &nlk->rx_ring;
629 struct nl_mmap_hdr *hdr;
630 unsigned int n;
631
632 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
633 if (hdr == NULL)
634 return false;
635
636 n = ring->head + ring->frame_max / 2;
637 if (n > ring->frame_max)
638 n -= ring->frame_max;
639
640 hdr = __netlink_lookup_frame(ring, n);
641
642 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
643}
644
Patrick McHardy9652e932013-04-17 06:47:02 +0000645static unsigned int netlink_poll(struct file *file, struct socket *sock,
646 poll_table *wait)
647{
648 struct sock *sk = sock->sk;
649 struct netlink_sock *nlk = nlk_sk(sk);
650 unsigned int mask;
Patrick McHardycd1df522013-04-17 06:47:05 +0000651 int err;
Patrick McHardy9652e932013-04-17 06:47:02 +0000652
Patrick McHardycd1df522013-04-17 06:47:05 +0000653 if (nlk->rx_ring.pg_vec != NULL) {
654 /* Memory mapped sockets don't call recvmsg(), so flow control
655 * for dumps is performed here. A dump is allowed to continue
656 * if at least half the ring is unused.
657 */
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700658 while (nlk->cb_running && netlink_dump_space(nlk)) {
Patrick McHardycd1df522013-04-17 06:47:05 +0000659 err = netlink_dump(sk);
660 if (err < 0) {
Ben Pfaffac30ef82014-07-09 10:31:22 -0700661 sk->sk_err = -err;
Patrick McHardycd1df522013-04-17 06:47:05 +0000662 sk->sk_error_report(sk);
663 break;
664 }
665 }
666 netlink_rcv_wake(sk);
667 }
Patrick McHardy5fd96122013-04-17 06:47:03 +0000668
Patrick McHardy9652e932013-04-17 06:47:02 +0000669 mask = datagram_poll(file, sock, wait);
670
671 spin_lock_bh(&sk->sk_receive_queue.lock);
672 if (nlk->rx_ring.pg_vec) {
673 netlink_forward_ring(&nlk->rx_ring);
674 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
675 mask |= POLLIN | POLLRDNORM;
676 }
677 spin_unlock_bh(&sk->sk_receive_queue.lock);
678
679 spin_lock_bh(&sk->sk_write_queue.lock);
680 if (nlk->tx_ring.pg_vec) {
681 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
682 mask |= POLLOUT | POLLWRNORM;
683 }
684 spin_unlock_bh(&sk->sk_write_queue.lock);
685
686 return mask;
687}
688
689static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
690{
691 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
692}
693
694static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
695 struct netlink_ring *ring,
696 struct nl_mmap_hdr *hdr)
697{
698 unsigned int size;
699 void *data;
700
701 size = ring->frame_size - NL_MMAP_HDRLEN;
702 data = (void *)hdr + NL_MMAP_HDRLEN;
703
704 skb->head = data;
705 skb->data = data;
706 skb_reset_tail_pointer(skb);
707 skb->end = skb->tail + size;
708 skb->len = 0;
709
710 skb->destructor = netlink_skb_destructor;
711 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
712 NETLINK_CB(skb).sk = sk;
713}
Patrick McHardy5fd96122013-04-17 06:47:03 +0000714
715static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
716 u32 dst_portid, u32 dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100717 struct scm_cookie *scm)
Patrick McHardy5fd96122013-04-17 06:47:03 +0000718{
719 struct netlink_sock *nlk = nlk_sk(sk);
720 struct netlink_ring *ring;
721 struct nl_mmap_hdr *hdr;
722 struct sk_buff *skb;
723 unsigned int maxlen;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000724 int err = 0, len = 0;
725
Patrick McHardy5fd96122013-04-17 06:47:03 +0000726 mutex_lock(&nlk->pg_vec_lock);
727
728 ring = &nlk->tx_ring;
729 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
730
731 do {
David Miller4682a032014-12-16 17:58:17 -0500732 unsigned int nm_len;
733
Patrick McHardy5fd96122013-04-17 06:47:03 +0000734 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
735 if (hdr == NULL) {
736 if (!(msg->msg_flags & MSG_DONTWAIT) &&
737 atomic_read(&nlk->tx_ring.pending))
738 schedule();
739 continue;
740 }
David Miller4682a032014-12-16 17:58:17 -0500741
742 nm_len = ACCESS_ONCE(hdr->nm_len);
743 if (nm_len > maxlen) {
Patrick McHardy5fd96122013-04-17 06:47:03 +0000744 err = -EINVAL;
745 goto out;
746 }
747
David Miller4682a032014-12-16 17:58:17 -0500748 netlink_frame_flush_dcache(hdr, nm_len);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000749
David Miller4682a032014-12-16 17:58:17 -0500750 skb = alloc_skb(nm_len, GFP_KERNEL);
751 if (skb == NULL) {
752 err = -ENOBUFS;
753 goto out;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000754 }
David Miller4682a032014-12-16 17:58:17 -0500755 __skb_put(skb, nm_len);
756 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
757 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
Patrick McHardy5fd96122013-04-17 06:47:03 +0000758
759 netlink_increment_head(ring);
760
761 NETLINK_CB(skb).portid = nlk->portid;
762 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100763 NETLINK_CB(skb).creds = scm->creds;
Patrick McHardy5fd96122013-04-17 06:47:03 +0000764
765 err = security_netlink_send(sk, skb);
766 if (err) {
767 kfree_skb(skb);
768 goto out;
769 }
770
771 if (unlikely(dst_group)) {
772 atomic_inc(&skb->users);
773 netlink_broadcast(sk, skb, dst_portid, dst_group,
774 GFP_KERNEL);
775 }
776 err = netlink_unicast(sk, skb, dst_portid,
777 msg->msg_flags & MSG_DONTWAIT);
778 if (err < 0)
779 goto out;
780 len += err;
781
782 } while (hdr != NULL ||
783 (!(msg->msg_flags & MSG_DONTWAIT) &&
784 atomic_read(&nlk->tx_ring.pending)));
785
786 if (len > 0)
787 err = len;
788out:
789 mutex_unlock(&nlk->pg_vec_lock);
790 return err;
791}
Patrick McHardyf9c22882013-04-17 06:47:04 +0000792
793static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
794{
795 struct nl_mmap_hdr *hdr;
796
797 hdr = netlink_mmap_hdr(skb);
798 hdr->nm_len = skb->len;
799 hdr->nm_group = NETLINK_CB(skb).dst_group;
800 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200801 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
802 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
David Miller4682a032014-12-16 17:58:17 -0500803 netlink_frame_flush_dcache(hdr, hdr->nm_len);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000804 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
805
806 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
807 kfree_skb(skb);
808}
809
810static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
811{
812 struct netlink_sock *nlk = nlk_sk(sk);
813 struct netlink_ring *ring = &nlk->rx_ring;
814 struct nl_mmap_hdr *hdr;
815
816 spin_lock_bh(&sk->sk_receive_queue.lock);
817 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
818 if (hdr == NULL) {
819 spin_unlock_bh(&sk->sk_receive_queue.lock);
820 kfree_skb(skb);
Patrick McHardycd1df522013-04-17 06:47:05 +0000821 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000822 return;
823 }
824 netlink_increment_head(ring);
825 __skb_queue_tail(&sk->sk_receive_queue, skb);
826 spin_unlock_bh(&sk->sk_receive_queue.lock);
827
828 hdr->nm_len = skb->len;
829 hdr->nm_group = NETLINK_CB(skb).dst_group;
830 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
Nicolas Dichtel1bf93102013-04-24 10:36:23 +0200831 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
832 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
Patrick McHardyf9c22882013-04-17 06:47:04 +0000833 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
834}
835
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000836#else /* CONFIG_NETLINK_MMAP */
Patrick McHardy9652e932013-04-17 06:47:02 +0000837#define netlink_skb_is_mmaped(skb) false
Patrick McHardyf9c22882013-04-17 06:47:04 +0000838#define netlink_rx_is_mmaped(sk) false
Patrick McHardy5fd96122013-04-17 06:47:03 +0000839#define netlink_tx_is_mmaped(sk) false
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000840#define netlink_mmap sock_no_mmap
Patrick McHardy9652e932013-04-17 06:47:02 +0000841#define netlink_poll datagram_poll
Christoph Hellwig7cc05662015-01-28 18:04:53 +0100842#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000843#endif /* CONFIG_NETLINK_MMAP */
844
Patrick McHardycf0a0182013-04-17 06:47:00 +0000845static void netlink_skb_destructor(struct sk_buff *skb)
846{
Patrick McHardy9652e932013-04-17 06:47:02 +0000847#ifdef CONFIG_NETLINK_MMAP
848 struct nl_mmap_hdr *hdr;
849 struct netlink_ring *ring;
850 struct sock *sk;
851
852 /* If a packet from the kernel to userspace was freed because of an
853 * error without being delivered to userspace, the kernel must reset
854 * the status. In the direction userspace to kernel, the status is
855 * always reset here after the packet was processed and freed.
856 */
857 if (netlink_skb_is_mmaped(skb)) {
858 hdr = netlink_mmap_hdr(skb);
859 sk = NETLINK_CB(skb).sk;
860
Patrick McHardy5fd96122013-04-17 06:47:03 +0000861 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
862 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
863 ring = &nlk_sk(sk)->tx_ring;
864 } else {
865 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
866 hdr->nm_len = 0;
867 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
868 }
869 ring = &nlk_sk(sk)->rx_ring;
Patrick McHardy9652e932013-04-17 06:47:02 +0000870 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000871
872 WARN_ON(atomic_read(&ring->pending) == 0);
873 atomic_dec(&ring->pending);
874 sock_put(sk);
875
Pablo Neira5e71d9d2013-06-03 09:28:43 +0000876 skb->head = NULL;
Patrick McHardy9652e932013-04-17 06:47:02 +0000877 }
878#endif
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000879 if (is_vmalloc_addr(skb->head)) {
Pablo Neira3a365152013-06-28 03:04:23 +0200880 if (!skb->cloned ||
881 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
882 vfree(skb->head);
883
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +0000884 skb->head = NULL;
885 }
Patrick McHardy9652e932013-04-17 06:47:02 +0000886 if (skb->sk != NULL)
887 sock_rfree(skb);
Patrick McHardycf0a0182013-04-17 06:47:00 +0000888}
889
890static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
891{
892 WARN_ON(skb->sk != NULL);
893 skb->sk = sk;
894 skb->destructor = netlink_skb_destructor;
895 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
896 sk_mem_charge(sk, skb->truesize);
897}
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899static void netlink_sock_destruct(struct sock *sk)
900{
Herbert Xu3f660d62007-05-03 03:17:14 -0700901 struct netlink_sock *nlk = nlk_sk(sk);
902
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700903 if (nlk->cb_running) {
904 if (nlk->cb.done)
905 nlk->cb.done(&nlk->cb);
Gao feng6dc878a2012-10-04 20:15:48 +0000906
Pravin B Shelar16b304f2013-08-15 15:31:06 -0700907 module_put(nlk->cb.module);
908 kfree_skb(nlk->cb.skb);
Herbert Xu3f660d62007-05-03 03:17:14 -0700909 }
910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 skb_queue_purge(&sk->sk_receive_queue);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000912#ifdef CONFIG_NETLINK_MMAP
913 if (1) {
914 struct nl_mmap_req req;
915
916 memset(&req, 0, sizeof(req));
917 if (nlk->rx_ring.pg_vec)
Florian Westphal0470eb92015-07-21 16:33:50 +0200918 __netlink_set_ring(sk, &req, false, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000919 memset(&req, 0, sizeof(req));
920 if (nlk->tx_ring.pg_vec)
Florian Westphal0470eb92015-07-21 16:33:50 +0200921 __netlink_set_ring(sk, &req, true, NULL, 0);
Patrick McHardyccdfcc32013-04-17 06:47:01 +0000922 }
923#endif /* CONFIG_NETLINK_MMAP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
925 if (!sock_flag(sk, SOCK_DEAD)) {
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800926 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 return;
928 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700929
930 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
931 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
932 WARN_ON(nlk_sk(sk)->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933}
934
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800935/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
936 * SMP. Look, when several writers sleep and reader wakes them up, all but one
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
938 * this, _but_ remember, it adds useless work on UP machines.
939 */
940
Johannes Bergd136f1b2009-09-12 03:03:15 +0000941void netlink_table_grab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800942 __acquires(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
Johannes Bergd136f1b2009-09-12 03:03:15 +0000944 might_sleep();
945
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700946 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
948 if (atomic_read(&nl_table_users)) {
949 DECLARE_WAITQUEUE(wait, current);
950
951 add_wait_queue_exclusive(&nl_table_wait, &wait);
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800952 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 set_current_state(TASK_UNINTERRUPTIBLE);
954 if (atomic_read(&nl_table_users) == 0)
955 break;
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700956 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 schedule();
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700958 write_lock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 }
960
961 __set_current_state(TASK_RUNNING);
962 remove_wait_queue(&nl_table_wait, &wait);
963 }
964}
965
Johannes Bergd136f1b2009-09-12 03:03:15 +0000966void netlink_table_ungrab(void)
Eric Dumazet9a429c42008-01-01 21:58:02 -0800967 __releases(nl_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Arjan van de Ven6abd2192006-07-03 00:24:07 -0700969 write_unlock_irq(&nl_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 wake_up(&nl_table_wait);
971}
972
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800973static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974netlink_lock_table(void)
975{
976 /* read_lock() synchronizes us to netlink_table_grab */
977
978 read_lock(&nl_table_lock);
979 atomic_inc(&nl_table_users);
980 read_unlock(&nl_table_lock);
981}
982
Patrick McHardy6ac552f2007-12-04 00:19:38 -0800983static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984netlink_unlock_table(void)
985{
986 if (atomic_dec_and_test(&nl_table_users))
987 wake_up(&nl_table_wait);
988}
989
Thomas Grafe3416942014-08-02 11:47:45 +0200990struct netlink_compare_arg
Gao fengda12c902013-06-06 14:49:11 +0800991{
Herbert Xuc428ecd2015-03-20 21:57:01 +1100992 possible_net_t pnet;
Thomas Grafe3416942014-08-02 11:47:45 +0200993 u32 portid;
994};
995
Herbert Xu8f2ddaa2015-03-21 14:14:03 +1100996/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
997#define netlink_compare_arg_len \
998 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
Thomas Grafe3416942014-08-02 11:47:45 +0200999
Herbert Xuc428ecd2015-03-20 21:57:01 +11001000static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1001 const void *ptr)
1002{
1003 const struct netlink_compare_arg *x = arg->key;
1004 const struct netlink_sock *nlk = ptr;
1005
1006 return nlk->portid != x->portid ||
1007 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1008}
1009
1010static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1011 struct net *net, u32 portid)
1012{
1013 memset(arg, 0, sizeof(*arg));
1014 write_pnet(&arg->pnet, net);
1015 arg->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001016}
1017
1018static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1019 struct net *net)
1020{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001021 struct netlink_compare_arg arg;
Thomas Grafe3416942014-08-02 11:47:45 +02001022
Herbert Xuc428ecd2015-03-20 21:57:01 +11001023 netlink_compare_arg_init(&arg, net, portid);
1024 return rhashtable_lookup_fast(&table->hash, &arg,
1025 netlink_rhashtable_params);
Gao fengda12c902013-06-06 14:49:11 +08001026}
1027
Herbert Xuc428ecd2015-03-20 21:57:01 +11001028static int __netlink_insert(struct netlink_table *table, struct sock *sk)
Ying Xuec5adde92015-01-12 14:52:23 +08001029{
Herbert Xuc428ecd2015-03-20 21:57:01 +11001030 struct netlink_compare_arg arg;
Ying Xuec5adde92015-01-12 14:52:23 +08001031
Herbert Xuc428ecd2015-03-20 21:57:01 +11001032 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1033 return rhashtable_lookup_insert_key(&table->hash, &arg,
1034 &nlk_sk(sk)->node,
1035 netlink_rhashtable_params);
Ying Xuec5adde92015-01-12 14:52:23 +08001036}
1037
Eric W. Biederman15e47302012-09-07 20:12:54 +00001038static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039{
Gao fengda12c902013-06-06 14:49:11 +08001040 struct netlink_table *table = &nl_table[protocol];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
Thomas Grafe3416942014-08-02 11:47:45 +02001043 rcu_read_lock();
1044 sk = __netlink_lookup(table, portid, net);
1045 if (sk)
1046 sock_hold(sk);
1047 rcu_read_unlock();
1048
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 return sk;
1050}
1051
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001052static const struct proto_ops netlink_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Patrick McHardy4277a082006-03-20 18:52:01 -08001054static void
1055netlink_update_listeners(struct sock *sk)
1056{
1057 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Patrick McHardy4277a082006-03-20 18:52:01 -08001058 unsigned long mask;
1059 unsigned int i;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001060 struct listeners *listeners;
1061
1062 listeners = nl_deref_protected(tbl->listeners);
1063 if (!listeners)
1064 return;
Patrick McHardy4277a082006-03-20 18:52:01 -08001065
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001066 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
Patrick McHardy4277a082006-03-20 18:52:01 -08001067 mask = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001068 sk_for_each_bound(sk, &tbl->mc_list) {
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001069 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1070 mask |= nlk_sk(sk)->groups[i];
1071 }
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001072 listeners->masks[i] = mask;
Patrick McHardy4277a082006-03-20 18:52:01 -08001073 }
1074 /* this function is only called with the netlink table "grabbed", which
1075 * makes sure updates are visible before bind or setsockopt return. */
1076}
1077
Herbert Xu8ea65f42015-01-26 14:02:56 +11001078static int netlink_insert(struct sock *sk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
Gao fengda12c902013-06-06 14:49:11 +08001080 struct netlink_table *table = &nl_table[sk->sk_protocol];
Herbert Xu919d9db2015-01-16 17:23:48 +11001081 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
Ying Xuec5adde92015-01-12 14:52:23 +08001083 lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
1085 err = -EBUSY;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001086 if (nlk_sk(sk)->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 goto err;
1088
1089 err = -ENOMEM;
Thomas Graf97defe12015-01-02 23:00:20 +01001090 if (BITS_PER_LONG > 32 &&
1091 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 goto err;
1093
Eric W. Biederman15e47302012-09-07 20:12:54 +00001094 nlk_sk(sk)->portid = portid;
Thomas Grafe3416942014-08-02 11:47:45 +02001095 sock_hold(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001096
Herbert Xuc428ecd2015-03-20 21:57:01 +11001097 err = __netlink_insert(table, sk);
1098 if (err) {
1099 if (err == -EEXIST)
1100 err = -EADDRINUSE;
Herbert Xuc0bb07d2015-05-16 21:50:28 +08001101 nlk_sk(sk)->portid = 0;
Ying Xuec5adde92015-01-12 14:52:23 +08001102 sock_put(sk);
Herbert Xu919d9db2015-01-16 17:23:48 +11001103 }
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105err:
Ying Xuec5adde92015-01-12 14:52:23 +08001106 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 return err;
1108}
1109
1110static void netlink_remove(struct sock *sk)
1111{
Thomas Grafe3416942014-08-02 11:47:45 +02001112 struct netlink_table *table;
1113
Thomas Grafe3416942014-08-02 11:47:45 +02001114 table = &nl_table[sk->sk_protocol];
Herbert Xuc428ecd2015-03-20 21:57:01 +11001115 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1116 netlink_rhashtable_params)) {
Thomas Grafe3416942014-08-02 11:47:45 +02001117 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1118 __sock_put(sk);
1119 }
Thomas Grafe3416942014-08-02 11:47:45 +02001120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 netlink_table_grab();
Johannes Bergb10dcb32014-12-22 18:56:37 +01001122 if (nlk_sk(sk)->subscriptions) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 __sk_del_bind_node(sk);
Johannes Bergb10dcb32014-12-22 18:56:37 +01001124 netlink_update_listeners(sk);
1125 }
Johannes Bergee1c24422015-01-16 11:37:14 +01001126 if (sk->sk_protocol == NETLINK_GENERIC)
1127 atomic_inc(&genl_sk_destructing_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 netlink_table_ungrab();
1129}
1130
1131static struct proto netlink_proto = {
1132 .name = "NETLINK",
1133 .owner = THIS_MODULE,
1134 .obj_size = sizeof(struct netlink_sock),
1135};
1136
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001137static int __netlink_create(struct net *net, struct socket *sock,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001138 struct mutex *cb_mutex, int protocol,
1139 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
1141 struct sock *sk;
1142 struct netlink_sock *nlk;
Patrick McHardyab33a172005-08-14 19:31:36 -07001143
1144 sock->ops = &netlink_ops;
1145
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001146 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
Patrick McHardyab33a172005-08-14 19:31:36 -07001147 if (!sk)
1148 return -ENOMEM;
1149
1150 sock_init_data(sock, sk);
1151
1152 nlk = nlk_sk(sk);
Eric Dumazet658cb352012-04-22 21:30:21 +00001153 if (cb_mutex) {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001154 nlk->cb_mutex = cb_mutex;
Eric Dumazet658cb352012-04-22 21:30:21 +00001155 } else {
Patrick McHardyffa4d722007-04-25 14:01:17 -07001156 nlk->cb_mutex = &nlk->cb_def_mutex;
1157 mutex_init(nlk->cb_mutex);
1158 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001159 init_waitqueue_head(&nlk->wait);
Patrick McHardyccdfcc32013-04-17 06:47:01 +00001160#ifdef CONFIG_NETLINK_MMAP
1161 mutex_init(&nlk->pg_vec_lock);
1162#endif
Patrick McHardyab33a172005-08-14 19:31:36 -07001163
1164 sk->sk_destruct = netlink_sock_destruct;
1165 sk->sk_protocol = protocol;
1166 return 0;
1167}
1168
Eric Paris3f378b62009-11-05 22:18:14 -08001169static int netlink_create(struct net *net, struct socket *sock, int protocol,
1170 int kern)
Patrick McHardyab33a172005-08-14 19:31:36 -07001171{
1172 struct module *module = NULL;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001173 struct mutex *cb_mutex;
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001174 struct netlink_sock *nlk;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001175 int (*bind)(struct net *net, int group);
1176 void (*unbind)(struct net *net, int group);
Patrick McHardyab33a172005-08-14 19:31:36 -07001177 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
1179 sock->state = SS_UNCONNECTED;
1180
1181 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1182 return -ESOCKTNOSUPPORT;
1183
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001184 if (protocol < 0 || protocol >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 return -EPROTONOSUPPORT;
1186
Patrick McHardy77247bb2005-08-14 19:27:13 -07001187 netlink_lock_table();
Johannes Berg95a5afc2008-10-16 15:24:51 -07001188#ifdef CONFIG_MODULES
Patrick McHardyab33a172005-08-14 19:31:36 -07001189 if (!nl_table[protocol].registered) {
Patrick McHardy77247bb2005-08-14 19:27:13 -07001190 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001191 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
Patrick McHardy77247bb2005-08-14 19:27:13 -07001192 netlink_lock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001193 }
Patrick McHardyab33a172005-08-14 19:31:36 -07001194#endif
1195 if (nl_table[protocol].registered &&
1196 try_module_get(nl_table[protocol].module))
1197 module = nl_table[protocol].module;
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001198 else
1199 err = -EPROTONOSUPPORT;
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07001200 cb_mutex = nl_table[protocol].cb_mutex;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001201 bind = nl_table[protocol].bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001202 unbind = nl_table[protocol].unbind;
Patrick McHardy77247bb2005-08-14 19:27:13 -07001203 netlink_unlock_table();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001204
Alexey Dobriyan974c37e2010-01-30 10:05:05 +00001205 if (err < 0)
1206 goto out;
1207
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001208 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001209 if (err < 0)
Patrick McHardyab33a172005-08-14 19:31:36 -07001210 goto out_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
David S. Miller6f756a82008-11-23 17:34:03 -08001212 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001213 sock_prot_inuse_add(net, &netlink_proto, 1);
David S. Miller6f756a82008-11-23 17:34:03 -08001214 local_bh_enable();
1215
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001216 nlk = nlk_sk(sock->sk);
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001217 nlk->module = module;
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00001218 nlk->netlink_bind = bind;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001219 nlk->netlink_unbind = unbind;
Patrick McHardyab33a172005-08-14 19:31:36 -07001220out:
1221 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Patrick McHardyab33a172005-08-14 19:31:36 -07001223out_module:
1224 module_put(module);
1225 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226}
1227
Thomas Graf21e49022015-01-02 23:00:22 +01001228static void deferred_put_nlk_sk(struct rcu_head *head)
1229{
1230 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1231
1232 sock_put(&nlk->sk);
1233}
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235static int netlink_release(struct socket *sock)
1236{
1237 struct sock *sk = sock->sk;
1238 struct netlink_sock *nlk;
1239
1240 if (!sk)
1241 return 0;
1242
1243 netlink_remove(sk);
Denis Lunevac57b3a2007-04-18 17:05:58 -07001244 sock_orphan(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 nlk = nlk_sk(sk);
1246
Herbert Xu3f660d62007-05-03 03:17:14 -07001247 /*
1248 * OK. Socket is unlinked, any packets that arrive now
1249 * will be purged.
1250 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
Johannes Bergee1c24422015-01-16 11:37:14 +01001252 /* must not acquire netlink_table_lock in any way again before unbind
1253 * and notifying genetlink is done as otherwise it might deadlock
1254 */
1255 if (nlk->netlink_unbind) {
1256 int i;
1257
1258 for (i = 0; i < nlk->ngroups; i++)
1259 if (test_bit(i, nlk->groups))
1260 nlk->netlink_unbind(sock_net(sk), i + 1);
1261 }
1262 if (sk->sk_protocol == NETLINK_GENERIC &&
1263 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1264 wake_up(&genl_sk_destructing_waitq);
1265
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 sock->sk = NULL;
1267 wake_up_interruptible_all(&nlk->wait);
1268
1269 skb_queue_purge(&sk->sk_write_queue);
1270
Eric W. Biederman15e47302012-09-07 20:12:54 +00001271 if (nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 struct netlink_notify n = {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001273 .net = sock_net(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 .protocol = sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001275 .portid = nlk->portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 };
Alan Sterne041c682006-03-27 01:16:30 -08001277 atomic_notifier_call_chain(&netlink_chain,
1278 NETLINK_URELEASE, &n);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001279 }
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001280
Mariusz Kozlowski5e7c0012007-01-02 15:24:30 -08001281 module_put(nlk->module);
Harald Welte4fdb3bb2005-08-09 19:40:55 -07001282
Denis V. Lunevaed81562007-10-10 21:14:32 -07001283 if (netlink_is_kernel(sk)) {
Johannes Bergb10dcb32014-12-22 18:56:37 +01001284 netlink_table_grab();
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001285 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1286 if (--nl_table[sk->sk_protocol].registered == 0) {
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001287 struct listeners *old;
1288
1289 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1290 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1291 kfree_rcu(old, rcu);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001292 nl_table[sk->sk_protocol].module = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001293 nl_table[sk->sk_protocol].bind = NULL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001294 nl_table[sk->sk_protocol].unbind = NULL;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001295 nl_table[sk->sk_protocol].flags = 0;
Denis V. Lunev869e58f2008-01-18 23:53:31 -08001296 nl_table[sk->sk_protocol].registered = 0;
1297 }
Johannes Bergb10dcb32014-12-22 18:56:37 +01001298 netlink_table_ungrab();
Eric Dumazet658cb352012-04-22 21:30:21 +00001299 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07001300
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001301 kfree(nlk->groups);
1302 nlk->groups = NULL;
1303
Eric Dumazet37558102008-11-24 14:05:22 -08001304 local_bh_disable();
Eric Dumazetc1fd3b92008-11-23 15:48:22 -08001305 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
Eric Dumazet37558102008-11-24 14:05:22 -08001306 local_bh_enable();
Thomas Graf21e49022015-01-02 23:00:22 +01001307 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 return 0;
1309}
1310
1311static int netlink_autobind(struct socket *sock)
1312{
1313 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001314 struct net *net = sock_net(sk);
Gao fengda12c902013-06-06 14:49:11 +08001315 struct netlink_table *table = &nl_table[sk->sk_protocol];
Eric W. Biederman15e47302012-09-07 20:12:54 +00001316 s32 portid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 int err;
Herbert Xub9fbe702015-05-17 10:45:34 +08001318 s32 rover = -4096;
1319 bool ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321retry:
1322 cond_resched();
Thomas Grafe3416942014-08-02 11:47:45 +02001323 rcu_read_lock();
Herbert Xub9fbe702015-05-17 10:45:34 +08001324 ok = !__netlink_lookup(table, portid, net);
1325 rcu_read_unlock();
1326 if (!ok) {
Thomas Grafe3416942014-08-02 11:47:45 +02001327 /* Bind collision, search negative portid values. */
Herbert Xub9fbe702015-05-17 10:45:34 +08001328 if (rover == -4096)
1329 /* rover will be in range [S32_MIN, -4097] */
1330 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
1331 else if (rover >= -4096)
Thomas Grafe3416942014-08-02 11:47:45 +02001332 rover = -4097;
Herbert Xub9fbe702015-05-17 10:45:34 +08001333 portid = rover--;
Thomas Grafe3416942014-08-02 11:47:45 +02001334 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
Herbert Xu8ea65f42015-01-26 14:02:56 +11001337 err = netlink_insert(sk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 if (err == -EADDRINUSE)
1339 goto retry;
David S. Millerd470e3b2005-06-26 15:31:51 -07001340
1341 /* If 2 threads race to autobind, that is fine. */
1342 if (err == -EBUSY)
1343 err = 0;
1344
1345 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346}
1347
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001348/**
1349 * __netlink_ns_capable - General netlink message capability test
1350 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1351 * @user_ns: The user namespace of the capability to use
1352 * @cap: The capability to use
1353 *
1354 * Test to see if the opener of the socket we received the message
1355 * from had when the netlink socket was created and the sender of the
1356 * message has has the capability @cap in the user namespace @user_ns.
1357 */
1358bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1359 struct user_namespace *user_ns, int cap)
1360{
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07001361 return ((nsp->flags & NETLINK_SKB_DST) ||
1362 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1363 ns_capable(user_ns, cap);
Eric W. Biedermanaa4cf942014-04-23 14:28:03 -07001364}
1365EXPORT_SYMBOL(__netlink_ns_capable);
1366
1367/**
1368 * netlink_ns_capable - General netlink message capability test
1369 * @skb: socket buffer holding a netlink command from userspace
1370 * @user_ns: The user namespace of the capability to use
1371 * @cap: The capability to use
1372 *
1373 * Test to see if the opener of the socket we received the message
1374 * from had when the netlink socket was created and the sender of the
1375 * message has has the capability @cap in the user namespace @user_ns.
1376 */
1377bool netlink_ns_capable(const struct sk_buff *skb,
1378 struct user_namespace *user_ns, int cap)
1379{
1380 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1381}
1382EXPORT_SYMBOL(netlink_ns_capable);
1383
1384/**
1385 * netlink_capable - Netlink global message capability test
1386 * @skb: socket buffer holding a netlink command from userspace
1387 * @cap: The capability to use
1388 *
1389 * Test to see if the opener of the socket we received the message
1390 * from had when the netlink socket was created and the sender of the
1391 * message has has the capability @cap in all user namespaces.
1392 */
1393bool netlink_capable(const struct sk_buff *skb, int cap)
1394{
1395 return netlink_ns_capable(skb, &init_user_ns, cap);
1396}
1397EXPORT_SYMBOL(netlink_capable);
1398
1399/**
1400 * netlink_net_capable - Netlink network namespace message capability test
1401 * @skb: socket buffer holding a netlink command from userspace
1402 * @cap: The capability to use
1403 *
1404 * Test to see if the opener of the socket we received the message
1405 * from had when the netlink socket was created and the sender of the
1406 * message has has the capability @cap over the network namespace of
1407 * the socket we received the message from.
1408 */
1409bool netlink_net_capable(const struct sk_buff *skb, int cap)
1410{
1411 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1412}
1413EXPORT_SYMBOL(netlink_net_capable);
1414
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001415static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001416{
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00001417 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
Eric W. Biedermandf008c92012-11-16 03:03:07 +00001418 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001419}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001421static void
1422netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1423{
1424 struct netlink_sock *nlk = nlk_sk(sk);
1425
1426 if (nlk->subscriptions && !subscriptions)
1427 __sk_del_bind_node(sk);
1428 else if (!nlk->subscriptions && subscriptions)
1429 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1430 nlk->subscriptions = subscriptions;
1431}
1432
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001433static int netlink_realloc_groups(struct sock *sk)
Patrick McHardy513c2502005-09-06 15:43:59 -07001434{
1435 struct netlink_sock *nlk = nlk_sk(sk);
1436 unsigned int groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001437 unsigned long *new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001438 int err = 0;
1439
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001440 netlink_table_grab();
1441
Patrick McHardy513c2502005-09-06 15:43:59 -07001442 groups = nl_table[sk->sk_protocol].groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001443 if (!nl_table[sk->sk_protocol].registered) {
Patrick McHardy513c2502005-09-06 15:43:59 -07001444 err = -ENOENT;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001445 goto out_unlock;
1446 }
Patrick McHardy513c2502005-09-06 15:43:59 -07001447
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001448 if (nlk->ngroups >= groups)
1449 goto out_unlock;
Patrick McHardy513c2502005-09-06 15:43:59 -07001450
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001451 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1452 if (new_groups == NULL) {
1453 err = -ENOMEM;
1454 goto out_unlock;
1455 }
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001456 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001457 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1458
1459 nlk->groups = new_groups;
Patrick McHardy513c2502005-09-06 15:43:59 -07001460 nlk->ngroups = groups;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001461 out_unlock:
1462 netlink_table_ungrab();
1463 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001464}
1465
Johannes Berg02c81ab2014-12-22 18:56:35 +01001466static void netlink_undo_bind(int group, long unsigned int groups,
Johannes Berg023e2cf2014-12-23 21:00:06 +01001467 struct sock *sk)
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001468{
Johannes Berg023e2cf2014-12-23 21:00:06 +01001469 struct netlink_sock *nlk = nlk_sk(sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001470 int undo;
1471
1472 if (!nlk->netlink_unbind)
1473 return;
1474
1475 for (undo = 0; undo < group; undo++)
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09001476 if (test_bit(undo, &groups))
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001477 nlk->netlink_unbind(sock_net(sk), undo + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001478}
1479
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001480static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1481 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482{
1483 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001484 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 struct netlink_sock *nlk = nlk_sk(sk);
1486 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1487 int err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001488 long unsigned int groups = nladdr->nl_groups;
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001489
Hannes Frederic Sowa4e4b5372012-12-15 15:42:19 +00001490 if (addr_len < sizeof(struct sockaddr_nl))
1491 return -EINVAL;
1492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (nladdr->nl_family != AF_NETLINK)
1494 return -EINVAL;
1495
1496 /* Only superuser is allowed to listen multicasts */
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001497 if (groups) {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001498 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy513c2502005-09-06 15:43:59 -07001499 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001500 err = netlink_realloc_groups(sk);
1501 if (err)
1502 return err;
Patrick McHardy513c2502005-09-06 15:43:59 -07001503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001505 if (nlk->portid)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001506 if (nladdr->nl_pid != nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 return -EINVAL;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001508
1509 if (nlk->netlink_bind && groups) {
1510 int group;
1511
1512 for (group = 0; group < nlk->ngroups; group++) {
1513 if (!test_bit(group, &groups))
1514 continue;
Pablo Neira8b7c36d2015-01-29 10:51:53 +01001515 err = nlk->netlink_bind(net, group + 1);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001516 if (!err)
1517 continue;
Johannes Berg023e2cf2014-12-23 21:00:06 +01001518 netlink_undo_bind(group, groups, sk);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001519 return err;
1520 }
1521 }
1522
1523 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 err = nladdr->nl_pid ?
Herbert Xu8ea65f42015-01-26 14:02:56 +11001525 netlink_insert(sk, nladdr->nl_pid) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 netlink_autobind(sock);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001527 if (err) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01001528 netlink_undo_bind(nlk->ngroups, groups, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 return err;
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 }
1532
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001533 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 return 0;
1535
1536 netlink_table_grab();
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001537 netlink_update_subscriptions(sk, nlk->subscriptions +
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001538 hweight32(groups) -
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001539 hweight32(nlk->groups[0]));
Richard Guy Briggs4f520902014-04-22 21:31:54 -04001540 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08001541 netlink_update_listeners(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 netlink_table_ungrab();
1543
1544 return 0;
1545}
1546
1547static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1548 int alen, int flags)
1549{
1550 int err = 0;
1551 struct sock *sk = sock->sk;
1552 struct netlink_sock *nlk = nlk_sk(sk);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001553 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Changli Gao6503d962010-03-31 22:58:26 +00001555 if (alen < sizeof(addr->sa_family))
1556 return -EINVAL;
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 if (addr->sa_family == AF_UNSPEC) {
1559 sk->sk_state = NETLINK_UNCONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001560 nlk->dst_portid = 0;
Patrick McHardyd629b832005-08-14 19:27:50 -07001561 nlk->dst_group = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 return 0;
1563 }
1564 if (addr->sa_family != AF_NETLINK)
1565 return -EINVAL;
1566
Mike Pecovnik46833a82014-02-24 21:11:16 +01001567 if ((nladdr->nl_groups || nladdr->nl_pid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07001568 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 return -EPERM;
1570
Eric W. Biederman15e47302012-09-07 20:12:54 +00001571 if (!nlk->portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 err = netlink_autobind(sock);
1573
1574 if (err == 0) {
1575 sk->sk_state = NETLINK_CONNECTED;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001576 nlk->dst_portid = nladdr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001577 nlk->dst_group = ffs(nladdr->nl_groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
1579
1580 return err;
1581}
1582
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001583static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1584 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585{
1586 struct sock *sk = sock->sk;
1587 struct netlink_sock *nlk = nlk_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001588 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09001589
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 nladdr->nl_family = AF_NETLINK;
1591 nladdr->nl_pad = 0;
1592 *addr_len = sizeof(*nladdr);
1593
1594 if (peer) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001595 nladdr->nl_pid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07001596 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001598 nladdr->nl_pid = nlk->portid;
Patrick McHardy513c2502005-09-06 15:43:59 -07001599 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 }
1601 return 0;
1602}
1603
Eric W. Biederman15e47302012-09-07 20:12:54 +00001604static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 struct sock *sock;
1607 struct netlink_sock *nlk;
1608
Eric W. Biederman15e47302012-09-07 20:12:54 +00001609 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 if (!sock)
1611 return ERR_PTR(-ECONNREFUSED);
1612
1613 /* Don't bother queuing skb if kernel socket has no input function */
1614 nlk = nlk_sk(sock);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001615 if (sock->sk_state == NETLINK_CONNECTED &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001616 nlk->dst_portid != nlk_sk(ssk)->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 sock_put(sock);
1618 return ERR_PTR(-ECONNREFUSED);
1619 }
1620 return sock;
1621}
1622
1623struct sock *netlink_getsockbyfilp(struct file *filp)
1624{
Al Viro496ad9a2013-01-23 17:07:38 -05001625 struct inode *inode = file_inode(filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 struct sock *sock;
1627
1628 if (!S_ISSOCK(inode->i_mode))
1629 return ERR_PTR(-ENOTSOCK);
1630
1631 sock = SOCKET_I(inode)->sk;
1632 if (sock->sk_family != AF_NETLINK)
1633 return ERR_PTR(-EINVAL);
1634
1635 sock_hold(sock);
1636 return sock;
1637}
1638
Pablo Neira3a365152013-06-28 03:04:23 +02001639static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1640 int broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001641{
1642 struct sk_buff *skb;
1643 void *data;
1644
Pablo Neira3a365152013-06-28 03:04:23 +02001645 if (size <= NLMSG_GOODSIZE || broadcast)
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001646 return alloc_skb(size, GFP_KERNEL);
1647
Pablo Neira3a365152013-06-28 03:04:23 +02001648 size = SKB_DATA_ALIGN(size) +
1649 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001650
1651 data = vmalloc(size);
1652 if (data == NULL)
Pablo Neira3a365152013-06-28 03:04:23 +02001653 return NULL;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001654
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001655 skb = __build_skb(data, size);
Pablo Neira3a365152013-06-28 03:04:23 +02001656 if (skb == NULL)
1657 vfree(data);
Eric Dumazet2ea2f622015-04-24 16:05:01 -07001658 else
Pablo Neira3a365152013-06-28 03:04:23 +02001659 skb->destructor = netlink_skb_destructor;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001660
1661 return skb;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001662}
1663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664/*
1665 * Attach a skb to a netlink socket.
1666 * The caller must hold a reference to the destination socket. On error, the
1667 * reference is dropped. The skb is not send to the destination, just all
1668 * all error checks are performed and memory in the queue is reserved.
1669 * Return values:
1670 * < 0: error. skb freed, reference to sock dropped.
1671 * 0: continue
1672 * 1: repeat lookup - reference dropped while waiting for socket memory.
1673 */
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001674int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001675 long *timeo, struct sock *ssk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676{
1677 struct netlink_sock *nlk;
1678
1679 nlk = nlk_sk(sk);
1680
Patrick McHardy5fd96122013-04-17 06:47:03 +00001681 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001682 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
Patrick McHardy5fd96122013-04-17 06:47:03 +00001683 !netlink_skb_is_mmaped(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 DECLARE_WAITQUEUE(wait, current);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001685 if (!*timeo) {
Denis V. Lunevaed81562007-10-10 21:14:32 -07001686 if (!ssk || netlink_is_kernel(ssk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 netlink_overrun(sk);
1688 sock_put(sk);
1689 kfree_skb(skb);
1690 return -EAGAIN;
1691 }
1692
1693 __set_current_state(TASK_INTERRUPTIBLE);
1694 add_wait_queue(&nlk->wait, &wait);
1695
1696 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001697 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 !sock_flag(sk, SOCK_DEAD))
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001699 *timeo = schedule_timeout(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
1701 __set_current_state(TASK_RUNNING);
1702 remove_wait_queue(&nlk->wait, &wait);
1703 sock_put(sk);
1704
1705 if (signal_pending(current)) {
1706 kfree_skb(skb);
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001707 return sock_intr_errno(*timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 }
1709 return 1;
1710 }
Patrick McHardycf0a0182013-04-17 06:47:00 +00001711 netlink_skb_set_owner_r(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 return 0;
1713}
1714
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001715static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 int len = skb->len;
1718
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02001719 netlink_deliver_tap(skb);
1720
Patrick McHardyf9c22882013-04-17 06:47:04 +00001721#ifdef CONFIG_NETLINK_MMAP
1722 if (netlink_skb_is_mmaped(skb))
1723 netlink_queue_mmaped_skb(sk, skb);
1724 else if (netlink_rx_is_mmaped(sk))
1725 netlink_ring_set_copied(sk, skb);
1726 else
1727#endif /* CONFIG_NETLINK_MMAP */
1728 skb_queue_tail(&sk->sk_receive_queue, skb);
David S. Miller676d2362014-04-11 16:15:36 -04001729 sk->sk_data_ready(sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001730 return len;
1731}
1732
1733int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1734{
1735 int len = __netlink_sendskb(sk, skb);
1736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 sock_put(sk);
1738 return len;
1739}
1740
1741void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1742{
1743 kfree_skb(skb);
1744 sock_put(sk);
1745}
1746
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001747static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748{
1749 int delta;
1750
Patrick McHardy1298ca42013-04-17 06:46:59 +00001751 WARN_ON(skb->sk != NULL);
Patrick McHardy5fd96122013-04-17 06:47:03 +00001752 if (netlink_skb_is_mmaped(skb))
1753 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001755 delta = skb->end - skb->tail;
Pablo Neira Ayusoc05cdb12013-06-03 09:46:28 +00001756 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 return skb;
1758
1759 if (skb_shared(skb)) {
1760 struct sk_buff *nskb = skb_clone(skb, allocation);
1761 if (!nskb)
1762 return skb;
Eric Dumazet8460c002012-04-19 02:24:28 +00001763 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 skb = nskb;
1765 }
1766
1767 if (!pskb_expand_head(skb, 0, -delta, allocation))
1768 skb->truesize -= delta;
1769
1770 return skb;
1771}
1772
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001773static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1774 struct sock *ssk)
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001775{
1776 int ret;
1777 struct netlink_sock *nlk = nlk_sk(sk);
1778
1779 ret = -ECONNREFUSED;
1780 if (nlk->netlink_rcv != NULL) {
1781 ret = skb->len;
Patrick McHardycf0a0182013-04-17 06:47:00 +00001782 netlink_skb_set_owner_r(skb, sk);
Patrick McHardye32123e2013-04-17 06:46:57 +00001783 NETLINK_CB(skb).sk = ssk;
Daniel Borkmann73bfd372013-12-23 14:35:55 +01001784 netlink_deliver_tap_kernel(sk, ssk, skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001785 nlk->netlink_rcv(skb);
Eric Dumazetbfb253c2012-04-22 21:30:29 +00001786 consume_skb(skb);
1787 } else {
1788 kfree_skb(skb);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001789 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001790 sock_put(sk);
1791 return ret;
1792}
1793
1794int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001795 u32 portid, int nonblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796{
1797 struct sock *sk;
1798 int err;
1799 long timeo;
1800
1801 skb = netlink_trim(skb, gfp_any());
1802
1803 timeo = sock_sndtimeo(ssk, nonblock);
1804retry:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001805 sk = netlink_getsockbyportid(ssk, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 if (IS_ERR(sk)) {
1807 kfree_skb(skb);
1808 return PTR_ERR(sk);
1809 }
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001810 if (netlink_is_kernel(sk))
Eric W. Biederman3fbc2902012-05-24 17:21:27 -06001811 return netlink_unicast_kernel(sk, skb, ssk);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07001812
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001813 if (sk_filter(sk, skb)) {
Wang Chen84874602008-07-01 19:55:09 -07001814 err = skb->len;
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07001815 kfree_skb(skb);
1816 sock_put(sk);
1817 return err;
1818 }
1819
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001820 err = netlink_attachskb(sk, skb, &timeo, ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 if (err == 1)
1822 goto retry;
1823 if (err)
1824 return err;
1825
Denis V. Lunev7ee015e2007-10-10 21:14:03 -07001826 return netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08001828EXPORT_SYMBOL(netlink_unicast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Patrick McHardyf9c22882013-04-17 06:47:04 +00001830struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1831 u32 dst_portid, gfp_t gfp_mask)
1832{
1833#ifdef CONFIG_NETLINK_MMAP
1834 struct sock *sk = NULL;
1835 struct sk_buff *skb;
1836 struct netlink_ring *ring;
1837 struct nl_mmap_hdr *hdr;
1838 unsigned int maxlen;
1839
1840 sk = netlink_getsockbyportid(ssk, dst_portid);
1841 if (IS_ERR(sk))
1842 goto out;
1843
1844 ring = &nlk_sk(sk)->rx_ring;
1845 /* fast-path without atomic ops for common case: non-mmaped receiver */
1846 if (ring->pg_vec == NULL)
1847 goto out_put;
1848
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001849 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1850 goto out_put;
1851
Patrick McHardyf9c22882013-04-17 06:47:04 +00001852 skb = alloc_skb_head(gfp_mask);
1853 if (skb == NULL)
1854 goto err1;
1855
1856 spin_lock_bh(&sk->sk_receive_queue.lock);
1857 /* check again under lock */
1858 if (ring->pg_vec == NULL)
1859 goto out_free;
1860
Thomas Grafaae9f0e2013-11-30 13:21:31 +01001861 /* check again under lock */
Patrick McHardyf9c22882013-04-17 06:47:04 +00001862 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1863 if (maxlen < size)
1864 goto out_free;
1865
1866 netlink_forward_ring(ring);
1867 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1868 if (hdr == NULL)
1869 goto err2;
1870 netlink_ring_setup_skb(skb, sk, ring, hdr);
1871 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1872 atomic_inc(&ring->pending);
1873 netlink_increment_head(ring);
1874
1875 spin_unlock_bh(&sk->sk_receive_queue.lock);
1876 return skb;
1877
1878err2:
1879 kfree_skb(skb);
1880 spin_unlock_bh(&sk->sk_receive_queue.lock);
Patrick McHardycd1df522013-04-17 06:47:05 +00001881 netlink_overrun(sk);
Patrick McHardyf9c22882013-04-17 06:47:04 +00001882err1:
1883 sock_put(sk);
1884 return NULL;
1885
1886out_free:
1887 kfree_skb(skb);
1888 spin_unlock_bh(&sk->sk_receive_queue.lock);
1889out_put:
1890 sock_put(sk);
1891out:
1892#endif
1893 return alloc_skb(size, gfp_mask);
1894}
1895EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1896
Patrick McHardy4277a082006-03-20 18:52:01 -08001897int netlink_has_listeners(struct sock *sk, unsigned int group)
1898{
1899 int res = 0;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001900 struct listeners *listeners;
Patrick McHardy4277a082006-03-20 18:52:01 -08001901
Denis V. Lunevaed81562007-10-10 21:14:32 -07001902 BUG_ON(!netlink_is_kernel(sk));
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001903
1904 rcu_read_lock();
1905 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1906
Eric Dumazet6d772ac2012-10-18 03:21:55 +00001907 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00001908 res = test_bit(group - 1, listeners->masks);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07001909
1910 rcu_read_unlock();
1911
Patrick McHardy4277a082006-03-20 18:52:01 -08001912 return res;
1913}
1914EXPORT_SYMBOL_GPL(netlink_has_listeners);
1915
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00001916static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917{
1918 struct netlink_sock *nlk = nlk_sk(sk);
1919
1920 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001921 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
Patrick McHardycf0a0182013-04-17 06:47:00 +00001922 netlink_skb_set_owner_r(skb, sk);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00001923 __netlink_sendskb(sk, skb);
stephen hemminger2c6458002011-12-22 08:52:03 +00001924 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 }
1926 return -1;
1927}
1928
1929struct netlink_broadcast_data {
1930 struct sock *exclude_sk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001931 struct net *net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001932 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 u32 group;
1934 int failure;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08001935 int delivery_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 int congested;
1937 int delivered;
Al Viro7d877f32005-10-21 03:20:43 -04001938 gfp_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 struct sk_buff *skb, *skb2;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001940 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1941 void *tx_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942};
1943
Rami Rosen46c95212014-07-01 21:17:35 +03001944static void do_one_broadcast(struct sock *sk,
1945 struct netlink_broadcast_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946{
1947 struct netlink_sock *nlk = nlk_sk(sk);
1948 int val;
1949
1950 if (p->exclude_sk == sk)
Rami Rosen46c95212014-07-01 21:17:35 +03001951 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
Eric W. Biederman15e47302012-09-07 20:12:54 +00001953 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07001954 !test_bit(p->group - 1, nlk->groups))
Rami Rosen46c95212014-07-01 21:17:35 +03001955 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001957 if (!net_eq(sock_net(sk), p->net)) {
1958 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1959 return;
1960
1961 if (!peernet_has_id(sock_net(sk), p->net))
1962 return;
1963
1964 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1965 CAP_NET_BROADCAST))
1966 return;
1967 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02001968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 if (p->failure) {
1970 netlink_overrun(sk);
Rami Rosen46c95212014-07-01 21:17:35 +03001971 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 }
1973
1974 sock_hold(sk);
1975 if (p->skb2 == NULL) {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001976 if (skb_shared(p->skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 p->skb2 = skb_clone(p->skb, p->allocation);
1978 } else {
Tommy S. Christensen68acc022005-05-19 13:06:35 -07001979 p->skb2 = skb_get(p->skb);
1980 /*
1981 * skb ownership may have been set when
1982 * delivered to a previous socket.
1983 */
1984 skb_orphan(p->skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 }
1986 }
1987 if (p->skb2 == NULL) {
1988 netlink_overrun(sk);
1989 /* Clone failed. Notify ALL listeners. */
1990 p->failure = 1;
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02001991 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00001992 p->delivery_failure = 1;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001993 goto out;
1994 }
1995 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
Eric W. Biederman910a7e92010-05-04 17:36:46 -07001996 kfree_skb(p->skb2);
1997 p->skb2 = NULL;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02001998 goto out;
1999 }
2000 if (sk_filter(sk, p->skb2)) {
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002001 kfree_skb(p->skb2);
2002 p->skb2 = NULL;
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002003 goto out;
2004 }
2005 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
2006 NETLINK_CB(p->skb2).nsid_is_set = true;
2007 val = netlink_broadcast_deliver(sk, p->skb2);
2008 if (val < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 netlink_overrun(sk);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002010 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002011 p->delivery_failure = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 } else {
2013 p->congested |= val;
2014 p->delivered = 1;
2015 p->skb2 = NULL;
2016 }
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002017out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019}
2020
Eric W. Biederman15e47302012-09-07 20:12:54 +00002021int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002022 u32 group, gfp_t allocation,
2023 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2024 void *filter_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002026 struct net *net = sock_net(ssk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 struct netlink_broadcast_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 struct sock *sk;
2029
2030 skb = netlink_trim(skb, allocation);
2031
2032 info.exclude_sk = ssk;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002033 info.net = net;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002034 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 info.group = group;
2036 info.failure = 0;
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002037 info.delivery_failure = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 info.congested = 0;
2039 info.delivered = 0;
2040 info.allocation = allocation;
2041 info.skb = skb;
2042 info.skb2 = NULL;
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002043 info.tx_filter = filter;
2044 info.tx_data = filter_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
2046 /* While we sleep in clone, do not allow to change socket list */
2047
2048 netlink_lock_table();
2049
Sasha Levinb67bfe02013-02-27 17:06:00 -08002050 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 do_one_broadcast(sk, &info);
2052
Neil Horman70d4bf62010-07-20 06:45:56 +00002053 consume_skb(skb);
Tommy S. Christensenaa1c6a62005-05-19 13:07:32 -07002054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 netlink_unlock_table();
2056
Neil Horman70d4bf62010-07-20 06:45:56 +00002057 if (info.delivery_failure) {
2058 kfree_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002059 return -ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002060 }
2061 consume_skb(info.skb2);
Pablo Neira Ayusoff491a72009-02-05 23:56:36 -08002062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 if (info.delivered) {
2064 if (info.congested && (allocation & __GFP_WAIT))
2065 yield();
2066 return 0;
2067 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 return -ESRCH;
2069}
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002070EXPORT_SYMBOL(netlink_broadcast_filtered);
2071
Eric W. Biederman15e47302012-09-07 20:12:54 +00002072int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002073 u32 group, gfp_t allocation)
2074{
Eric W. Biederman15e47302012-09-07 20:12:54 +00002075 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
Eric W. Biederman910a7e92010-05-04 17:36:46 -07002076 NULL, NULL);
2077}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002078EXPORT_SYMBOL(netlink_broadcast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079
2080struct netlink_set_err_data {
2081 struct sock *exclude_sk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002082 u32 portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 u32 group;
2084 int code;
2085};
2086
stephen hemmingerb57ef81f2011-12-22 08:52:02 +00002087static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088{
2089 struct netlink_sock *nlk = nlk_sk(sk);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002090 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
2092 if (sk == p->exclude_sk)
2093 goto out;
2094
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08002095 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002096 goto out;
2097
Eric W. Biederman15e47302012-09-07 20:12:54 +00002098 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
Patrick McHardyf7fa9b12005-08-15 12:29:13 -07002099 !test_bit(p->group - 1, nlk->groups))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 goto out;
2101
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002102 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002103 ret = 1;
2104 goto out;
2105 }
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 sk->sk_err = p->code;
2108 sk->sk_error_report(sk);
2109out:
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002110 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111}
2112
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002113/**
2114 * netlink_set_err - report error to broadcast listeners
2115 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
Eric W. Biederman15e47302012-09-07 20:12:54 +00002116 * @portid: the PORTID of a process that we want to skip (if any)
Johannes Berg840e93f22013-11-19 10:35:40 +01002117 * @group: the broadcast group that will notice the error
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002118 * @code: error code, must be negative (as usual in kernelspace)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002119 *
2120 * This function returns the number of broadcast listeners that have set the
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002121 * NETLINK_NO_ENOBUFS socket option.
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002122 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002123int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124{
2125 struct netlink_set_err_data info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 struct sock *sk;
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002127 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 info.exclude_sk = ssk;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002130 info.portid = portid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 info.group = group;
Pablo Neira Ayuso4843b932009-03-03 23:37:30 -08002132 /* sk->sk_err wants a positive error value */
2133 info.code = -code;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
2135 read_lock(&nl_table_lock);
2136
Sasha Levinb67bfe02013-02-27 17:06:00 -08002137 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002138 ret += do_one_set_err(sk, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
2140 read_unlock(&nl_table_lock);
Pablo Neira Ayuso1a503072010-03-18 14:24:42 +00002141 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142}
Pablo Neira Ayusodd5b6ce2009-03-23 13:21:06 +01002143EXPORT_SYMBOL(netlink_set_err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
Johannes Berg84659eb2007-07-18 15:47:05 -07002145/* must be called with netlink table grabbed */
2146static void netlink_update_socket_mc(struct netlink_sock *nlk,
2147 unsigned int group,
2148 int is_new)
2149{
2150 int old, new = !!is_new, subscriptions;
2151
2152 old = test_bit(group - 1, nlk->groups);
2153 subscriptions = nlk->subscriptions - old + new;
2154 if (new)
2155 __set_bit(group - 1, nlk->groups);
2156 else
2157 __clear_bit(group - 1, nlk->groups);
2158 netlink_update_subscriptions(&nlk->sk, subscriptions);
2159 netlink_update_listeners(&nlk->sk);
2160}
2161
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002162static int netlink_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002163 char __user *optval, unsigned int optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002164{
2165 struct sock *sk = sock->sk;
2166 struct netlink_sock *nlk = nlk_sk(sk);
Johannes Bergeb496532007-07-18 02:07:51 -07002167 unsigned int val = 0;
2168 int err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002169
2170 if (level != SOL_NETLINK)
2171 return -ENOPROTOOPT;
2172
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002173 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2174 optlen >= sizeof(int) &&
Johannes Bergeb496532007-07-18 02:07:51 -07002175 get_user(val, (unsigned int __user *)optval))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002176 return -EFAULT;
2177
2178 switch (optname) {
2179 case NETLINK_PKTINFO:
2180 if (val)
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002181 nlk->flags |= NETLINK_F_RECV_PKTINFO;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002182 else
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002183 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002184 err = 0;
2185 break;
2186 case NETLINK_ADD_MEMBERSHIP:
2187 case NETLINK_DROP_MEMBERSHIP: {
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002188 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002189 return -EPERM;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002190 err = netlink_realloc_groups(sk);
2191 if (err)
2192 return err;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002193 if (!val || val - 1 >= nlk->ngroups)
2194 return -EINVAL;
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002195 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
Johannes Berg023e2cf2014-12-23 21:00:06 +01002196 err = nlk->netlink_bind(sock_net(sk), val);
Richard Guy Briggs4f520902014-04-22 21:31:54 -04002197 if (err)
2198 return err;
2199 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002200 netlink_table_grab();
Johannes Berg84659eb2007-07-18 15:47:05 -07002201 netlink_update_socket_mc(nlk, val,
2202 optname == NETLINK_ADD_MEMBERSHIP);
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002203 netlink_table_ungrab();
Richard Guy Briggs7774d5e2014-04-22 21:31:55 -04002204 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
Johannes Berg023e2cf2014-12-23 21:00:06 +01002205 nlk->netlink_unbind(sock_net(sk), val);
Pablo Neira Ayuso03292742012-06-29 06:15:22 +00002206
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002207 err = 0;
2208 break;
2209 }
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002210 case NETLINK_BROADCAST_ERROR:
2211 if (val)
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002212 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002213 else
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002214 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002215 err = 0;
2216 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002217 case NETLINK_NO_ENOBUFS:
2218 if (val) {
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002219 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2220 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002221 wake_up_interruptible(&nlk->wait);
Eric Dumazet658cb352012-04-22 21:30:21 +00002222 } else {
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002223 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
Eric Dumazet658cb352012-04-22 21:30:21 +00002224 }
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002225 err = 0;
2226 break;
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002227#ifdef CONFIG_NETLINK_MMAP
2228 case NETLINK_RX_RING:
2229 case NETLINK_TX_RING: {
2230 struct nl_mmap_req req;
2231
2232 /* Rings might consume more memory than queue limits, require
2233 * CAP_NET_ADMIN.
2234 */
2235 if (!capable(CAP_NET_ADMIN))
2236 return -EPERM;
2237 if (optlen < sizeof(req))
2238 return -EINVAL;
2239 if (copy_from_user(&req, optval, sizeof(req)))
2240 return -EFAULT;
Florian Westphal0470eb92015-07-21 16:33:50 +02002241 err = netlink_set_ring(sk, &req,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00002242 optname == NETLINK_TX_RING);
2243 break;
2244 }
2245#endif /* CONFIG_NETLINK_MMAP */
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002246 case NETLINK_LISTEN_ALL_NSID:
2247 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2248 return -EPERM;
2249
2250 if (val)
2251 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
2252 else
2253 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
2254 err = 0;
2255 break;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002256 default:
2257 err = -ENOPROTOOPT;
2258 }
2259 return err;
2260}
2261
2262static int netlink_getsockopt(struct socket *sock, int level, int optname,
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002263 char __user *optval, int __user *optlen)
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002264{
2265 struct sock *sk = sock->sk;
2266 struct netlink_sock *nlk = nlk_sk(sk);
2267 int len, val, err;
2268
2269 if (level != SOL_NETLINK)
2270 return -ENOPROTOOPT;
2271
2272 if (get_user(len, optlen))
2273 return -EFAULT;
2274 if (len < 0)
2275 return -EINVAL;
2276
2277 switch (optname) {
2278 case NETLINK_PKTINFO:
2279 if (len < sizeof(int))
2280 return -EINVAL;
2281 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002282 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
Heiko Carstensa27b58f2006-10-30 15:06:12 -08002283 if (put_user(len, optlen) ||
2284 put_user(val, optval))
2285 return -EFAULT;
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002286 err = 0;
2287 break;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002288 case NETLINK_BROADCAST_ERROR:
2289 if (len < sizeof(int))
2290 return -EINVAL;
2291 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002292 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
Pablo Neira Ayusobe0c22a2009-02-18 01:40:43 +00002293 if (put_user(len, optlen) ||
2294 put_user(val, optval))
2295 return -EFAULT;
2296 err = 0;
2297 break;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002298 case NETLINK_NO_ENOBUFS:
2299 if (len < sizeof(int))
2300 return -EINVAL;
2301 len = sizeof(int);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002302 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07002303 if (put_user(len, optlen) ||
2304 put_user(val, optval))
2305 return -EFAULT;
2306 err = 0;
2307 break;
David Herrmannb42be382015-06-17 17:14:33 +02002308 case NETLINK_LIST_MEMBERSHIPS: {
2309 int pos, idx, shift;
2310
2311 err = 0;
2312 netlink_table_grab();
2313 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
2314 if (len - pos < sizeof(u32))
2315 break;
2316
2317 idx = pos / sizeof(unsigned long);
2318 shift = (pos % sizeof(unsigned long)) * 8;
2319 if (put_user((u32)(nlk->groups[idx] >> shift),
2320 (u32 __user *)(optval + pos))) {
2321 err = -EFAULT;
2322 break;
2323 }
2324 }
2325 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
2326 err = -EFAULT;
2327 netlink_table_ungrab();
2328 break;
2329 }
Patrick McHardy9a4595b2005-08-15 12:32:15 -07002330 default:
2331 err = -ENOPROTOOPT;
2332 }
2333 return err;
2334}
2335
2336static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2337{
2338 struct nl_pktinfo info;
2339
2340 info.group = NETLINK_CB(skb).dst_group;
2341 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2342}
2343
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002344static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
2345 struct sk_buff *skb)
2346{
2347 if (!NETLINK_CB(skb).nsid_is_set)
2348 return;
2349
2350 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
2351 &NETLINK_CB(skb).nsid);
2352}
2353
Ying Xue1b784142015-03-02 15:37:48 +08002354static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 struct sock *sk = sock->sk;
2357 struct netlink_sock *nlk = nlk_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002358 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002359 u32 dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002360 u32 dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 struct sk_buff *skb;
2362 int err;
2363 struct scm_cookie scm;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002364 u32 netlink_skb_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365
2366 if (msg->msg_flags&MSG_OOB)
2367 return -EOPNOTSUPP;
2368
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002369 err = scm_send(sock, msg, &scm, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 if (err < 0)
2371 return err;
2372
2373 if (msg->msg_namelen) {
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002374 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 if (addr->nl_family != AF_NETLINK)
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002376 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002377 dst_portid = addr->nl_pid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002378 dst_group = ffs(addr->nl_groups);
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002379 err = -EPERM;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002380 if ((dst_group || dst_portid) &&
Eric W. Biederman5187cd02014-04-23 14:25:48 -07002381 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
Eric W. Biedermanb47030c2010-06-13 03:31:06 +00002382 goto out;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002383 netlink_skb_flags |= NETLINK_SKB_DST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 } else {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002385 dst_portid = nlk->dst_portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002386 dst_group = nlk->dst_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 }
2388
Eric W. Biederman15e47302012-09-07 20:12:54 +00002389 if (!nlk->portid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 err = netlink_autobind(sock);
2391 if (err)
2392 goto out;
2393 }
2394
Al Viroa8866ff2014-12-12 23:02:36 -05002395 /* It's a really convoluted way for userland to ask for mmaped
2396 * sendmsg(), but that's what we've got...
2397 */
Patrick McHardy5fd96122013-04-17 06:47:03 +00002398 if (netlink_tx_is_mmaped(sk) &&
Al Viroa8866ff2014-12-12 23:02:36 -05002399 msg->msg_iter.type == ITER_IOVEC &&
2400 msg->msg_iter.nr_segs == 1 &&
Al Viroc0371da2014-11-24 10:42:55 -05002401 msg->msg_iter.iov->iov_base == NULL) {
Patrick McHardy5fd96122013-04-17 06:47:03 +00002402 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002403 &scm);
Patrick McHardy5fd96122013-04-17 06:47:03 +00002404 goto out;
2405 }
2406
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 err = -EMSGSIZE;
2408 if (len > sk->sk_sndbuf - 32)
2409 goto out;
2410 err = -ENOBUFS;
Pablo Neira3a365152013-06-28 03:04:23 +02002411 skb = netlink_alloc_large_skb(len, dst_group);
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002412 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 goto out;
2414
Eric W. Biederman15e47302012-09-07 20:12:54 +00002415 NETLINK_CB(skb).portid = nlk->portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002416 NETLINK_CB(skb).dst_group = dst_group;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002417 NETLINK_CB(skb).creds = scm.creds;
Eric W. Biederman2d7a85f2014-05-30 11:04:00 -07002418 NETLINK_CB(skb).flags = netlink_skb_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 err = -EFAULT;
Al Viro6ce8e9c2014-04-06 21:25:44 -04002421 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 kfree_skb(skb);
2423 goto out;
2424 }
2425
2426 err = security_netlink_send(sk, skb);
2427 if (err) {
2428 kfree_skb(skb);
2429 goto out;
2430 }
2431
Patrick McHardyd629b832005-08-14 19:27:50 -07002432 if (dst_group) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002434 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00002436 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437
2438out:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002439 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 return err;
2441}
2442
Ying Xue1b784142015-03-02 15:37:48 +08002443static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 int flags)
2445{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 struct scm_cookie scm;
2447 struct sock *sk = sock->sk;
2448 struct netlink_sock *nlk = nlk_sk(sk);
2449 int noblock = flags&MSG_DONTWAIT;
2450 size_t copied;
Johannes Berg68d6ac62010-08-15 21:20:44 +00002451 struct sk_buff *skb, *data_skb;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002452 int err, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
2454 if (flags&MSG_OOB)
2455 return -EOPNOTSUPP;
2456
2457 copied = 0;
2458
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002459 skb = skb_recv_datagram(sk, flags, noblock, &err);
2460 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 goto out;
2462
Johannes Berg68d6ac62010-08-15 21:20:44 +00002463 data_skb = skb;
2464
Johannes Berg1dacc762009-07-01 11:26:02 +00002465#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2466 if (unlikely(skb_shinfo(skb)->frag_list)) {
Johannes Berg1dacc762009-07-01 11:26:02 +00002467 /*
Johannes Berg68d6ac62010-08-15 21:20:44 +00002468 * If this skb has a frag_list, then here that means that we
2469 * will have to use the frag_list skb's data for compat tasks
2470 * and the regular skb's data for normal (non-compat) tasks.
Johannes Berg1dacc762009-07-01 11:26:02 +00002471 *
Johannes Berg68d6ac62010-08-15 21:20:44 +00002472 * If we need to send the compat skb, assign it to the
2473 * 'data_skb' variable so that it will be used below for data
2474 * copying. We keep 'skb' for everything else, including
2475 * freeing both later.
Johannes Berg1dacc762009-07-01 11:26:02 +00002476 */
Johannes Berg68d6ac62010-08-15 21:20:44 +00002477 if (flags & MSG_CMSG_COMPAT)
2478 data_skb = skb_shinfo(skb)->frag_list;
Johannes Berg1dacc762009-07-01 11:26:02 +00002479 }
2480#endif
2481
Eric Dumazet9063e212014-03-07 12:02:33 -08002482 /* Record the max length of recvmsg() calls for future allocations */
2483 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2484 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2485 16384);
2486
Johannes Berg68d6ac62010-08-15 21:20:44 +00002487 copied = data_skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 if (len < copied) {
2489 msg->msg_flags |= MSG_TRUNC;
2490 copied = len;
2491 }
2492
Johannes Berg68d6ac62010-08-15 21:20:44 +00002493 skb_reset_transport_header(data_skb);
David S. Miller51f3d022014-11-05 16:46:40 -05002494 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
2496 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01002497 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 addr->nl_family = AF_NETLINK;
2499 addr->nl_pad = 0;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002500 addr->nl_pid = NETLINK_CB(skb).portid;
Patrick McHardyd629b832005-08-14 19:27:50 -07002501 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 msg->msg_namelen = sizeof(*addr);
2503 }
2504
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002505 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002506 netlink_cmsg_recv_pktinfo(msg, skb);
Nicolas Dichtel59324cf2015-05-07 11:02:53 +02002507 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2508 netlink_cmsg_listen_all_nsid(sk, msg, skb);
Patrick McHardycc9a06c2006-03-12 20:34:27 -08002509
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002510 memset(&scm, 0, sizeof(scm));
2511 scm.creds = *NETLINK_CREDS(skb);
Patrick McHardy188ccb52007-05-03 03:27:01 -07002512 if (flags & MSG_TRUNC)
Johannes Berg68d6ac62010-08-15 21:20:44 +00002513 copied = data_skb->len;
David S. Millerdaa37662010-08-15 23:21:50 -07002514
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 skb_free_datagram(sk, skb);
2516
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002517 if (nlk->cb_running &&
2518 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
Andrey Vaginb44d2112011-02-21 02:40:47 +00002519 ret = netlink_dump(sk);
2520 if (ret) {
Ben Pfaffac30ef82014-07-09 10:31:22 -07002521 sk->sk_err = -ret;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002522 sk->sk_error_report(sk);
2523 }
2524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002526 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527out:
2528 netlink_rcv_wake(sk);
2529 return err ? : copied;
2530}
2531
David S. Miller676d2362014-04-11 16:15:36 -04002532static void netlink_data_ready(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002534 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535}
2536
2537/*
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09002538 * We export these functions to other modules. They provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 * complete set of kernel non-blocking support for message
2540 * queueing.
2541 */
2542
2543struct sock *
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002544__netlink_kernel_create(struct net *net, int unit, struct module *module,
2545 struct netlink_kernel_cfg *cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546{
2547 struct socket *sock;
2548 struct sock *sk;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002549 struct netlink_sock *nlk;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002550 struct listeners *listeners = NULL;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002551 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2552 unsigned int groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553
Akinobu Mitafab2caf2006-08-29 02:15:24 -07002554 BUG_ON(!nl_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002556 if (unit < 0 || unit >= MAX_LINKS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 return NULL;
2558
2559 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2560 return NULL;
Eric W. Biederman13d30782015-05-08 21:11:33 -05002561
2562 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002563 goto out_sock_release_nosk;
2564
2565 sk = sock->sk;
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002566
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002567 if (!cfg || cfg->groups < 32)
Patrick McHardy4277a082006-03-20 18:52:01 -08002568 groups = 32;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002569 else
2570 groups = cfg->groups;
Patrick McHardy4277a082006-03-20 18:52:01 -08002571
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002572 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
Patrick McHardy4277a082006-03-20 18:52:01 -08002573 if (!listeners)
2574 goto out_sock_release;
2575
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 sk->sk_data_ready = netlink_data_ready;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00002577 if (cfg && cfg->input)
2578 nlk_sk(sk)->netlink_rcv = cfg->input;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579
Herbert Xu8ea65f42015-01-26 14:02:56 +11002580 if (netlink_insert(sk, 0))
Patrick McHardy77247bb2005-08-14 19:27:13 -07002581 goto out_sock_release;
2582
2583 nlk = nlk_sk(sk);
Nicolas Dichtelcc3a5722015-05-07 11:02:52 +02002584 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
Patrick McHardy77247bb2005-08-14 19:27:13 -07002585
2586 netlink_table_grab();
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002587 if (!nl_table[unit].registered) {
2588 nl_table[unit].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002589 rcu_assign_pointer(nl_table[unit].listeners, listeners);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002590 nl_table[unit].cb_mutex = cb_mutex;
2591 nl_table[unit].module = module;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002592 if (cfg) {
2593 nl_table[unit].bind = cfg->bind;
Hiroaki SHIMODA6251edd2014-11-13 04:24:10 +09002594 nl_table[unit].unbind = cfg->unbind;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002595 nl_table[unit].flags = cfg->flags;
Gao fengda12c902013-06-06 14:49:11 +08002596 if (cfg->compare)
2597 nl_table[unit].compare = cfg->compare;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00002598 }
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002599 nl_table[unit].registered = 1;
Jesper Juhlf937f1f462007-10-15 01:39:12 -07002600 } else {
2601 kfree(listeners);
Denis V. Lunev869e58f2008-01-18 23:53:31 -08002602 nl_table[unit].registered++;
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002603 }
Patrick McHardy77247bb2005-08-14 19:27:13 -07002604 netlink_table_ungrab();
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002605 return sk;
2606
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002607out_sock_release:
Patrick McHardy4277a082006-03-20 18:52:01 -08002608 kfree(listeners);
Denis V. Lunev9dfbec12008-02-29 11:17:56 -08002609 netlink_kernel_release(sk);
Pavel Emelyanov23fe1862008-01-30 19:31:06 -08002610 return NULL;
2611
2612out_sock_release_nosk:
Harald Welte4fdb3bb2005-08-09 19:40:55 -07002613 sock_release(sock);
Patrick McHardy77247bb2005-08-14 19:27:13 -07002614 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615}
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00002616EXPORT_SYMBOL(__netlink_kernel_create);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002617
2618void
2619netlink_kernel_release(struct sock *sk)
2620{
Eric W. Biederman13d30782015-05-08 21:11:33 -05002621 if (sk == NULL || sk->sk_socket == NULL)
2622 return;
2623
2624 sock_release(sk->sk_socket);
Denis V. Lunevb7c6ba62008-01-28 14:41:19 -08002625}
2626EXPORT_SYMBOL(netlink_kernel_release);
2627
Johannes Bergd136f1b2009-09-12 03:03:15 +00002628int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002629{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002630 struct listeners *new, *old;
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002631 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002632
2633 if (groups < 32)
2634 groups = 32;
2635
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002636 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002637 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2638 if (!new)
Johannes Bergd136f1b2009-09-12 03:03:15 +00002639 return -ENOMEM;
Eric Dumazet6d772ac2012-10-18 03:21:55 +00002640 old = nl_deref_protected(tbl->listeners);
Eric Dumazet5c398dc2010-10-24 04:27:10 +00002641 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2642 rcu_assign_pointer(tbl->listeners, new);
2643
Lai Jiangshan37b6b932011-03-15 18:01:42 +08002644 kfree_rcu(old, rcu);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002645 }
2646 tbl->groups = groups;
2647
Johannes Bergd136f1b2009-09-12 03:03:15 +00002648 return 0;
2649}
2650
2651/**
2652 * netlink_change_ngroups - change number of multicast groups
2653 *
2654 * This changes the number of multicast groups that are available
2655 * on a certain netlink family. Note that it is not possible to
2656 * change the number of groups to below 32. Also note that it does
2657 * not implicitly call netlink_clear_multicast_users() when the
2658 * number of groups is reduced.
2659 *
2660 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2661 * @groups: The new number of groups.
2662 */
2663int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2664{
2665 int err;
2666
2667 netlink_table_grab();
2668 err = __netlink_change_ngroups(sk, groups);
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002669 netlink_table_ungrab();
Johannes Bergd136f1b2009-09-12 03:03:15 +00002670
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002671 return err;
2672}
Johannes Bergb4ff4f02007-07-18 15:46:06 -07002673
Johannes Bergb8273572009-09-24 15:44:05 -07002674void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2675{
2676 struct sock *sk;
Johannes Bergb8273572009-09-24 15:44:05 -07002677 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2678
Sasha Levinb67bfe02013-02-27 17:06:00 -08002679 sk_for_each_bound(sk, &tbl->mc_list)
Johannes Bergb8273572009-09-24 15:44:05 -07002680 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2681}
2682
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002683struct nlmsghdr *
Eric W. Biederman15e47302012-09-07 20:12:54 +00002684__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002685{
2686 struct nlmsghdr *nlh;
Hong zhi guo573ce262013-03-27 06:47:04 +00002687 int size = nlmsg_msg_size(len);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002688
Wang Yufen23b45672014-02-17 16:53:32 +08002689 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002690 nlh->nlmsg_type = type;
2691 nlh->nlmsg_len = size;
2692 nlh->nlmsg_flags = flags;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002693 nlh->nlmsg_pid = portid;
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002694 nlh->nlmsg_seq = seq;
2695 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
Hong zhi guo573ce262013-03-27 06:47:04 +00002696 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
Denys Vlasenkoa46621a2012-01-30 15:22:06 -05002697 return nlh;
2698}
2699EXPORT_SYMBOL(__nlmsg_put);
2700
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701/*
2702 * It looks a bit ugly.
2703 * It would be better to create kernel thread.
2704 */
2705
2706static int netlink_dump(struct sock *sk)
2707{
2708 struct netlink_sock *nlk = nlk_sk(sk);
2709 struct netlink_callback *cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002710 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 struct nlmsghdr *nlh;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002712 int len, err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002713 int alloc_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002715 mutex_lock(nlk->cb_mutex);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002716 if (!nlk->cb_running) {
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002717 err = -EINVAL;
2718 goto errout_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 }
2720
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002721 cb = &nlk->cb;
Greg Rosec7ac8672011-06-10 01:27:09 +00002722 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2723
Patrick McHardyf9c22882013-04-17 06:47:04 +00002724 if (!netlink_rx_is_mmaped(sk) &&
2725 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2726 goto errout_skb;
Eric Dumazet9063e212014-03-07 12:02:33 -08002727
2728 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2729 * required, but it makes sense to _attempt_ a 16K bytes allocation
2730 * to reduce number of system calls on dump operations, if user
2731 * ever provided a big enough buffer.
2732 */
2733 if (alloc_size < nlk->max_recvmsg_len) {
2734 skb = netlink_alloc_skb(sk,
2735 nlk->max_recvmsg_len,
2736 nlk->portid,
2737 GFP_KERNEL |
2738 __GFP_NOWARN |
2739 __GFP_NORETRY);
2740 /* available room should be exact amount to avoid MSG_TRUNC */
2741 if (skb)
2742 skb_reserve(skb, skb_tailroom(skb) -
2743 nlk->max_recvmsg_len);
2744 }
2745 if (!skb)
2746 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2747 GFP_KERNEL);
Greg Rosec7ac8672011-06-10 01:27:09 +00002748 if (!skb)
Dan Carpenterc63d6ea2011-06-15 03:11:42 +00002749 goto errout_skb;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002750 netlink_skb_set_owner_r(skb, sk);
Greg Rosec7ac8672011-06-10 01:27:09 +00002751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 len = cb->dump(skb, cb);
2753
2754 if (len > 0) {
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002755 mutex_unlock(nlk->cb_mutex);
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002756
2757 if (sk_filter(sk, skb))
2758 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002759 else
2760 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 return 0;
2762 }
2763
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002764 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2765 if (!nlh)
2766 goto errout_skb;
2767
Johannes Berg670dc282011-06-20 13:40:46 +02002768 nl_dump_check_consistent(cb, nlh);
2769
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002770 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2771
Stephen Hemmingerb1153f22008-03-21 15:46:12 -07002772 if (sk_filter(sk, skb))
2773 kfree_skb(skb);
Eric Dumazet4a7e7c22012-04-05 22:17:46 +00002774 else
2775 __netlink_sendskb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
Thomas Grafa8f74b22005-11-10 02:25:52 +01002777 if (cb->done)
2778 cb->done(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002780 nlk->cb_running = false;
2781 mutex_unlock(nlk->cb_mutex);
Gao feng6dc878a2012-10-04 20:15:48 +00002782 module_put(cb->module);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002783 consume_skb(cb->skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 return 0;
Thomas Graf17977542005-06-18 22:53:48 -07002785
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002786errout_skb:
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002787 mutex_unlock(nlk->cb_mutex);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002788 kfree_skb(skb);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002789 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790}
2791
Gao feng6dc878a2012-10-04 20:15:48 +00002792int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2793 const struct nlmsghdr *nlh,
2794 struct netlink_dump_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795{
2796 struct netlink_callback *cb;
2797 struct sock *sk;
2798 struct netlink_sock *nlk;
Andrey Vaginb44d2112011-02-21 02:40:47 +00002799 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800
Patrick McHardyf9c22882013-04-17 06:47:04 +00002801 /* Memory mapped dump requests need to be copied to avoid looping
2802 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2803 * a reference to the skb.
2804 */
2805 if (netlink_skb_is_mmaped(skb)) {
2806 skb = skb_copy(skb, GFP_KERNEL);
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002807 if (skb == NULL)
Patrick McHardyf9c22882013-04-17 06:47:04 +00002808 return -ENOBUFS;
Patrick McHardyf9c22882013-04-17 06:47:04 +00002809 } else
2810 atomic_inc(&skb->users);
2811
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002812 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2813 if (sk == NULL) {
2814 ret = -ECONNREFUSED;
2815 goto error_free;
2816 }
2817
2818 nlk = nlk_sk(sk);
2819 mutex_lock(nlk->cb_mutex);
2820 /* A dump is in progress... */
2821 if (nlk->cb_running) {
2822 ret = -EBUSY;
2823 goto error_unlock;
2824 }
2825 /* add reference of module which cb->dump belongs to */
2826 if (!try_module_get(control->module)) {
2827 ret = -EPROTONOSUPPORT;
2828 goto error_unlock;
2829 }
2830
2831 cb = &nlk->cb;
2832 memset(cb, 0, sizeof(*cb));
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002833 cb->dump = control->dump;
2834 cb->done = control->done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 cb->nlh = nlh;
Pablo Neira Ayuso7175c882012-02-24 14:30:16 +00002836 cb->data = control->data;
Gao feng6dc878a2012-10-04 20:15:48 +00002837 cb->module = control->module;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00002838 cb->min_dump_alloc = control->min_dump_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 cb->skb = skb;
2840
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002841 nlk->cb_running = true;
Gao feng6dc878a2012-10-04 20:15:48 +00002842
Patrick McHardyaf65bdf2007-04-20 14:14:21 -07002843 mutex_unlock(nlk->cb_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844
Andrey Vaginb44d2112011-02-21 02:40:47 +00002845 ret = netlink_dump(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 sock_put(sk);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002847
Andrey Vaginb44d2112011-02-21 02:40:47 +00002848 if (ret)
2849 return ret;
2850
Denis V. Lunev5c582982007-10-23 20:29:25 -07002851 /* We successfully started a dump, by returning -EINTR we
2852 * signal not to send ACK even if it was requested.
2853 */
2854 return -EINTR;
Pravin B Shelar16b304f2013-08-15 15:31:06 -07002855
2856error_unlock:
2857 sock_put(sk);
2858 mutex_unlock(nlk->cb_mutex);
2859error_free:
2860 kfree_skb(skb);
2861 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862}
Gao feng6dc878a2012-10-04 20:15:48 +00002863EXPORT_SYMBOL(__netlink_dump_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864
2865void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2866{
2867 struct sk_buff *skb;
2868 struct nlmsghdr *rep;
2869 struct nlmsgerr *errmsg;
Thomas Graf339bf982006-11-10 14:10:15 -08002870 size_t payload = sizeof(*errmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871
Thomas Graf339bf982006-11-10 14:10:15 -08002872 /* error messages get the original request appened */
2873 if (err)
2874 payload += nlmsg_len(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
Patrick McHardyf9c22882013-04-17 06:47:04 +00002876 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2877 NETLINK_CB(in_skb).portid, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 if (!skb) {
2879 struct sock *sk;
2880
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002881 sk = netlink_lookup(sock_net(in_skb->sk),
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02002882 in_skb->sk->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002883 NETLINK_CB(in_skb).portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 if (sk) {
2885 sk->sk_err = ENOBUFS;
2886 sk->sk_error_report(sk);
2887 sock_put(sk);
2888 }
2889 return;
2890 }
2891
Eric W. Biederman15e47302012-09-07 20:12:54 +00002892 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
John Fastabend5dba93a2009-09-25 13:11:44 +00002893 NLMSG_ERROR, payload, 0);
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002894 errmsg = nlmsg_data(rep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 errmsg->error = err;
Thomas Grafbf8b79e2006-08-04 23:03:29 -07002896 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
Eric W. Biederman15e47302012-09-07 20:12:54 +00002897 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002899EXPORT_SYMBOL(netlink_ack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002901int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002902 struct nlmsghdr *))
Thomas Graf82ace472005-11-10 02:25:53 +01002903{
Thomas Graf82ace472005-11-10 02:25:53 +01002904 struct nlmsghdr *nlh;
2905 int err;
2906
2907 while (skb->len >= nlmsg_total_size(0)) {
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002908 int msglen;
2909
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -07002910 nlh = nlmsg_hdr(skb);
Thomas Grafd35b6852007-03-22 23:28:46 -07002911 err = 0;
Thomas Graf82ace472005-11-10 02:25:53 +01002912
Martin Murrayad8e4b72006-01-10 13:02:29 -08002913 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
Thomas Graf82ace472005-11-10 02:25:53 +01002914 return 0;
2915
Thomas Grafd35b6852007-03-22 23:28:46 -07002916 /* Only requests are handled by the kernel */
2917 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
Denis V. Lunev5c582982007-10-23 20:29:25 -07002918 goto ack;
Thomas Grafd35b6852007-03-22 23:28:46 -07002919
Thomas Graf45e7ae72007-03-22 23:29:10 -07002920 /* Skip control messages */
2921 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
Denis V. Lunev5c582982007-10-23 20:29:25 -07002922 goto ack;
Thomas Graf45e7ae72007-03-22 23:29:10 -07002923
Thomas Graf1d00a4e2007-03-22 23:30:12 -07002924 err = cb(skb, nlh);
Denis V. Lunev5c582982007-10-23 20:29:25 -07002925 if (err == -EINTR)
2926 goto skip;
2927
2928ack:
Thomas Grafd35b6852007-03-22 23:28:46 -07002929 if (nlh->nlmsg_flags & NLM_F_ACK || err)
Thomas Graf82ace472005-11-10 02:25:53 +01002930 netlink_ack(skb, nlh, err);
Thomas Graf82ace472005-11-10 02:25:53 +01002931
Denis V. Lunev5c582982007-10-23 20:29:25 -07002932skip:
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002933 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07002934 if (msglen > skb->len)
2935 msglen = skb->len;
2936 skb_pull(skb, msglen);
Thomas Graf82ace472005-11-10 02:25:53 +01002937 }
2938
2939 return 0;
2940}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002941EXPORT_SYMBOL(netlink_rcv_skb);
Thomas Graf82ace472005-11-10 02:25:53 +01002942
2943/**
Thomas Grafd387f6a2006-08-15 00:31:06 -07002944 * nlmsg_notify - send a notification netlink message
2945 * @sk: netlink socket to use
2946 * @skb: notification message
Eric W. Biederman15e47302012-09-07 20:12:54 +00002947 * @portid: destination netlink portid for reports or 0
Thomas Grafd387f6a2006-08-15 00:31:06 -07002948 * @group: destination multicast group or 0
2949 * @report: 1 to report back, 0 to disable
2950 * @flags: allocation flags
2951 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002952int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
Thomas Grafd387f6a2006-08-15 00:31:06 -07002953 unsigned int group, int report, gfp_t flags)
2954{
2955 int err = 0;
2956
2957 if (group) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00002958 int exclude_portid = 0;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002959
2960 if (report) {
2961 atomic_inc(&skb->users);
Eric W. Biederman15e47302012-09-07 20:12:54 +00002962 exclude_portid = portid;
Thomas Grafd387f6a2006-08-15 00:31:06 -07002963 }
2964
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002965 /* errors reported via destination sk->sk_err, but propagate
2966 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
Eric W. Biederman15e47302012-09-07 20:12:54 +00002967 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002968 }
2969
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002970 if (report) {
2971 int err2;
2972
Eric W. Biederman15e47302012-09-07 20:12:54 +00002973 err2 = nlmsg_unicast(sk, skb, portid);
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08002974 if (!err || err == -ESRCH)
2975 err = err2;
2976 }
Thomas Grafd387f6a2006-08-15 00:31:06 -07002977
2978 return err;
2979}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08002980EXPORT_SYMBOL(nlmsg_notify);
Thomas Grafd387f6a2006-08-15 00:31:06 -07002981
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982#ifdef CONFIG_PROC_FS
2983struct nl_seq_iter {
Denis V. Luneve372c412007-11-19 22:31:54 -08002984 struct seq_net_private p;
Herbert Xu56d28b12015-02-04 07:33:24 +11002985 struct rhashtable_iter hti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 int link;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987};
2988
Herbert Xu56d28b12015-02-04 07:33:24 +11002989static int netlink_walk_start(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990{
Herbert Xu56d28b12015-02-04 07:33:24 +11002991 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
Herbert Xu56d28b12015-02-04 07:33:24 +11002993 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
2994 if (err) {
2995 iter->link = MAX_LINKS;
2996 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 }
Herbert Xu56d28b12015-02-04 07:33:24 +11002998
2999 err = rhashtable_walk_start(&iter->hti);
3000 return err == -EAGAIN ? 0 : err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001}
3002
Herbert Xu56d28b12015-02-04 07:33:24 +11003003static void netlink_walk_stop(struct nl_seq_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004{
Herbert Xu56d28b12015-02-04 07:33:24 +11003005 rhashtable_walk_stop(&iter->hti);
3006 rhashtable_walk_exit(&iter->hti);
3007}
3008
3009static void *__netlink_seq_next(struct seq_file *seq)
3010{
3011 struct nl_seq_iter *iter = seq->private;
3012 struct netlink_sock *nlk;
3013
3014 do {
3015 for (;;) {
3016 int err;
3017
3018 nlk = rhashtable_walk_next(&iter->hti);
3019
3020 if (IS_ERR(nlk)) {
3021 if (PTR_ERR(nlk) == -EAGAIN)
3022 continue;
3023
3024 return nlk;
3025 }
3026
3027 if (nlk)
3028 break;
3029
3030 netlink_walk_stop(iter);
3031 if (++iter->link >= MAX_LINKS)
3032 return NULL;
3033
3034 err = netlink_walk_start(iter);
3035 if (err)
3036 return ERR_PTR(err);
3037 }
3038 } while (sock_net(&nlk->sk) != seq_file_net(seq));
3039
3040 return nlk;
3041}
3042
3043static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3044{
3045 struct nl_seq_iter *iter = seq->private;
3046 void *obj = SEQ_START_TOKEN;
3047 loff_t pos;
3048 int err;
3049
3050 iter->link = 0;
3051
3052 err = netlink_walk_start(iter);
3053 if (err)
3054 return ERR_PTR(err);
3055
3056 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3057 obj = __netlink_seq_next(seq);
3058
3059 return obj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060}
3061
3062static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3063{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 ++*pos;
Herbert Xu56d28b12015-02-04 07:33:24 +11003065 return __netlink_seq_next(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066}
3067
3068static void netlink_seq_stop(struct seq_file *seq, void *v)
3069{
Herbert Xu56d28b12015-02-04 07:33:24 +11003070 struct nl_seq_iter *iter = seq->private;
3071
3072 if (iter->link >= MAX_LINKS)
3073 return;
3074
3075 netlink_walk_stop(iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076}
3077
3078
3079static int netlink_seq_show(struct seq_file *seq, void *v)
3080{
Eric Dumazet658cb352012-04-22 21:30:21 +00003081 if (v == SEQ_START_TOKEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 seq_puts(seq,
3083 "sk Eth Pid Groups "
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003084 "Rmem Wmem Dump Locks Drops Inode\n");
Eric Dumazet658cb352012-04-22 21:30:21 +00003085 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 struct sock *s = v;
3087 struct netlink_sock *nlk = nlk_sk(s);
3088
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003089 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 s,
3091 s->sk_protocol,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003092 nlk->portid,
Patrick McHardy513c2502005-09-06 15:43:59 -07003093 nlk->groups ? (u32)nlk->groups[0] : 0,
Eric Dumazet31e6d362009-06-17 19:05:41 -07003094 sk_rmem_alloc_get(s),
3095 sk_wmem_alloc_get(s),
Pravin B Shelar16b304f2013-08-15 15:31:06 -07003096 nlk->cb_running,
Pablo Neira Ayuso38938bf2009-03-24 16:37:55 -07003097 atomic_read(&s->sk_refcnt),
Masatake YAMATOcf0aa4e2010-02-27 19:45:37 +00003098 atomic_read(&s->sk_drops),
3099 sock_i_ino(s)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 );
3101
3102 }
3103 return 0;
3104}
3105
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003106static const struct seq_operations netlink_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107 .start = netlink_seq_start,
3108 .next = netlink_seq_next,
3109 .stop = netlink_seq_stop,
3110 .show = netlink_seq_show,
3111};
3112
3113
3114static int netlink_seq_open(struct inode *inode, struct file *file)
3115{
Denis V. Luneve372c412007-11-19 22:31:54 -08003116 return seq_open_net(inode, file, &netlink_seq_ops,
3117 sizeof(struct nl_seq_iter));
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003118}
3119
Arjan van de Venda7071d2007-02-12 00:55:36 -08003120static const struct file_operations netlink_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 .owner = THIS_MODULE,
3122 .open = netlink_seq_open,
3123 .read = seq_read,
3124 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003125 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126};
3127
3128#endif
3129
3130int netlink_register_notifier(struct notifier_block *nb)
3131{
Alan Sterne041c682006-03-27 01:16:30 -08003132 return atomic_notifier_chain_register(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003134EXPORT_SYMBOL(netlink_register_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135
3136int netlink_unregister_notifier(struct notifier_block *nb)
3137{
Alan Sterne041c682006-03-27 01:16:30 -08003138 return atomic_notifier_chain_unregister(&netlink_chain, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139}
Patrick McHardy6ac552f2007-12-04 00:19:38 -08003140EXPORT_SYMBOL(netlink_unregister_notifier);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003141
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003142static const struct proto_ops netlink_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 .family = PF_NETLINK,
3144 .owner = THIS_MODULE,
3145 .release = netlink_release,
3146 .bind = netlink_bind,
3147 .connect = netlink_connect,
3148 .socketpair = sock_no_socketpair,
3149 .accept = sock_no_accept,
3150 .getname = netlink_getname,
Patrick McHardy9652e932013-04-17 06:47:02 +00003151 .poll = netlink_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 .ioctl = sock_no_ioctl,
3153 .listen = sock_no_listen,
3154 .shutdown = sock_no_shutdown,
Patrick McHardy9a4595b2005-08-15 12:32:15 -07003155 .setsockopt = netlink_setsockopt,
3156 .getsockopt = netlink_getsockopt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 .sendmsg = netlink_sendmsg,
3158 .recvmsg = netlink_recvmsg,
Patrick McHardyccdfcc32013-04-17 06:47:01 +00003159 .mmap = netlink_mmap,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 .sendpage = sock_no_sendpage,
3161};
3162
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003163static const struct net_proto_family netlink_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 .family = PF_NETLINK,
3165 .create = netlink_create,
3166 .owner = THIS_MODULE, /* for consistency 8) */
3167};
3168
Pavel Emelyanov46650792007-10-08 20:38:39 -07003169static int __net_init netlink_net_init(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003170{
3171#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00003172 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003173 return -ENOMEM;
3174#endif
3175 return 0;
3176}
3177
Pavel Emelyanov46650792007-10-08 20:38:39 -07003178static void __net_exit netlink_net_exit(struct net *net)
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003179{
3180#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00003181 remove_proc_entry("netlink", net->proc_net);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003182#endif
3183}
3184
David S. Millerb963ea82010-08-30 19:08:01 -07003185static void __init netlink_add_usersock_entry(void)
3186{
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003187 struct listeners *listeners;
David S. Millerb963ea82010-08-30 19:08:01 -07003188 int groups = 32;
3189
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003190 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
David S. Millerb963ea82010-08-30 19:08:01 -07003191 if (!listeners)
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003192 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
David S. Millerb963ea82010-08-30 19:08:01 -07003193
3194 netlink_table_grab();
3195
3196 nl_table[NETLINK_USERSOCK].groups = groups;
Eric Dumazet5c398dc2010-10-24 04:27:10 +00003197 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
David S. Millerb963ea82010-08-30 19:08:01 -07003198 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3199 nl_table[NETLINK_USERSOCK].registered = 1;
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00003200 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
David S. Millerb963ea82010-08-30 19:08:01 -07003201
3202 netlink_table_ungrab();
3203}
3204
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003205static struct pernet_operations __net_initdata netlink_net_ops = {
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003206 .init = netlink_net_init,
3207 .exit = netlink_net_exit,
3208};
3209
Patrick McHardy49f7b332015-03-25 13:07:45 +00003210static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
Herbert Xuc428ecd2015-03-20 21:57:01 +11003211{
3212 const struct netlink_sock *nlk = data;
3213 struct netlink_compare_arg arg;
3214
3215 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
Herbert Xu11b58ba2015-03-24 00:50:22 +11003216 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
Herbert Xuc428ecd2015-03-20 21:57:01 +11003217}
3218
3219static const struct rhashtable_params netlink_rhashtable_params = {
3220 .head_offset = offsetof(struct netlink_sock, node),
3221 .key_len = netlink_compare_arg_len,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003222 .obj_hashfn = netlink_hash,
3223 .obj_cmpfn = netlink_compare,
Thomas Grafb5e2c152015-03-24 20:42:19 +00003224 .automatic_shrinking = true,
Herbert Xuc428ecd2015-03-20 21:57:01 +11003225};
3226
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227static int __init netlink_proto_init(void)
3228{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 int err = proto_register(&netlink_proto, 0);
3231
3232 if (err != 0)
3233 goto out;
3234
YOSHIFUJI Hideaki / 吉藤英明fab25742013-01-09 07:19:48 +00003235 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07003237 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003238 if (!nl_table)
3239 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 for (i = 0; i < MAX_LINKS; i++) {
Herbert Xuc428ecd2015-03-20 21:57:01 +11003242 if (rhashtable_init(&nl_table[i].hash,
3243 &netlink_rhashtable_params) < 0) {
Thomas Grafe3416942014-08-02 11:47:45 +02003244 while (--i > 0)
3245 rhashtable_destroy(&nl_table[i].hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 kfree(nl_table);
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003247 goto panic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 }
3250
Daniel Borkmannbcbde0d2013-06-21 19:38:07 +02003251 INIT_LIST_HEAD(&netlink_tap_all);
3252
David S. Millerb963ea82010-08-30 19:08:01 -07003253 netlink_add_usersock_entry();
3254
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 sock_register(&netlink_family_ops);
Eric W. Biedermanb4b51022007-09-12 13:05:38 +02003256 register_pernet_subsys(&netlink_net_ops);
YOSHIFUJI Hideaki746fac42007-02-09 23:25:07 +09003257 /* The netlink device handler may be needed early. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 rtnetlink_init();
3259out:
3260 return err;
Akinobu Mitafab2caf2006-08-29 02:15:24 -07003261panic:
3262 panic("netlink_init: Cannot allocate nl_table\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263}
3264
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265core_initcall(netlink_proto_init);