blob: a9b5ebc1af2106cb12baaaae014f4d9b05430e1b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Routing netlink socket interface: protocol independent part.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * Fixes:
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
17 */
18
Jakub Kicinskiee5d0322017-06-21 18:25:04 -070019#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/errno.h>
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/socket.h>
24#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/timer.h>
26#include <linux/string.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/fcntl.h>
30#include <linux/mm.h>
31#include <linux/slab.h>
32#include <linux/interrupt.h>
33#include <linux/capability.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
36#include <linux/security.h>
Stephen Hemminger6756ae42006-03-20 22:23:58 -080037#include <linux/mutex.h>
Thomas Graf18237302006-08-04 23:04:54 -070038#include <linux/if_addr.h>
John Fastabend77162022012-04-15 06:43:56 +000039#include <linux/if_bridge.h>
Jiri Pirkof6f64242014-11-28 14:34:15 +010040#include <linux/if_vlan.h>
Williams, Mitch Aebc08a62010-02-10 01:44:05 +000041#include <linux/pci.h>
John Fastabend77162022012-04-15 06:43:56 +000042#include <linux/etherdevice.h>
Martin KaFai Lau58038692017-06-15 17:29:09 -070043#include <linux/bpf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080045#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <linux/inet.h>
48#include <linux/netdevice.h>
Jiri Pirko82f28412014-11-28 14:34:18 +010049#include <net/switchdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/ip.h>
51#include <net/protocol.h>
52#include <net/arp.h>
53#include <net/route.h>
54#include <net/udp.h>
Daniel Borkmannea697632015-01-05 23:57:47 +010055#include <net/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <net/sock.h>
57#include <net/pkt_sched.h>
Thomas Graf14c0b972006-08-04 03:38:38 -070058#include <net/fib_rules.h>
Thomas Grafe2849862007-03-22 11:48:11 -070059#include <net/rtnetlink.h>
Johannes Berg30ffee82009-07-10 09:51:35 +000060#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Eric Dumazete0d087a2009-11-07 01:26:17 -080062struct rtnl_link {
Thomas Grafe2849862007-03-22 11:48:11 -070063 rtnl_doit_func doit;
64 rtnl_dumpit_func dumpit;
Florian Westphal62256f92017-08-09 20:41:52 +020065 unsigned int flags;
Thomas Grafe2849862007-03-22 11:48:11 -070066};
67
Stephen Hemminger6756ae42006-03-20 22:23:58 -080068static DEFINE_MUTEX(rtnl_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70void rtnl_lock(void)
71{
Stephen Hemminger6756ae42006-03-20 22:23:58 -080072 mutex_lock(&rtnl_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
Eric Dumazete0d087a2009-11-07 01:26:17 -080074EXPORT_SYMBOL(rtnl_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Eric Dumazet1b5c5492016-06-13 20:21:50 -070076static struct sk_buff *defer_kfree_skb_list;
77void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
78{
79 if (head && tail) {
80 tail->next = defer_kfree_skb_list;
81 defer_kfree_skb_list = head;
82 }
83}
84EXPORT_SYMBOL(rtnl_kfree_skbs);
85
Stephen Hemminger6756ae42006-03-20 22:23:58 -080086void __rtnl_unlock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Eric Dumazet1b5c5492016-06-13 20:21:50 -070088 struct sk_buff *head = defer_kfree_skb_list;
89
90 defer_kfree_skb_list = NULL;
91
Stephen Hemminger6756ae42006-03-20 22:23:58 -080092 mutex_unlock(&rtnl_mutex);
Eric Dumazet1b5c5492016-06-13 20:21:50 -070093
94 while (head) {
95 struct sk_buff *next = head->next;
96
97 kfree_skb(head);
98 cond_resched();
99 head = next;
100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101}
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103void rtnl_unlock(void)
104{
Herbert Xu58ec3b42008-10-07 15:50:03 -0700105 /* This fellow will unlock it for us. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 netdev_run_todo();
107}
Eric Dumazete0d087a2009-11-07 01:26:17 -0800108EXPORT_SYMBOL(rtnl_unlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800110int rtnl_trylock(void)
111{
112 return mutex_trylock(&rtnl_mutex);
113}
Eric Dumazete0d087a2009-11-07 01:26:17 -0800114EXPORT_SYMBOL(rtnl_trylock);
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800115
Patrick McHardyc9c10142008-04-23 22:10:48 -0700116int rtnl_is_locked(void)
117{
118 return mutex_is_locked(&rtnl_mutex);
119}
Eric Dumazete0d087a2009-11-07 01:26:17 -0800120EXPORT_SYMBOL(rtnl_is_locked);
Patrick McHardyc9c10142008-04-23 22:10:48 -0700121
Paul E. McKenneya898def2010-02-22 17:04:49 -0800122#ifdef CONFIG_PROVE_LOCKING
Yaowei Bai0cbf3342015-10-08 21:29:02 +0800123bool lockdep_rtnl_is_held(void)
Paul E. McKenneya898def2010-02-22 17:04:49 -0800124{
125 return lockdep_is_held(&rtnl_mutex);
126}
127EXPORT_SYMBOL(lockdep_rtnl_is_held);
128#endif /* #ifdef CONFIG_PROVE_LOCKING */
129
Florian Westphal6853dd42017-08-09 20:41:51 +0200130static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
Florian Westphal019a3162017-08-09 20:41:49 +0200131static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1];
Thomas Grafe2849862007-03-22 11:48:11 -0700132
133static inline int rtm_msgindex(int msgtype)
134{
135 int msgindex = msgtype - RTM_BASE;
136
137 /*
138 * msgindex < 0 implies someone tried to register a netlink
139 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
140 * the message type has not been added to linux/rtnetlink.h
141 */
142 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
143
144 return msgindex;
145}
146
Thomas Grafe2849862007-03-22 11:48:11 -0700147/**
148 * __rtnl_register - Register a rtnetlink message type
149 * @protocol: Protocol family or PF_UNSPEC
150 * @msgtype: rtnetlink message type
151 * @doit: Function pointer called for each request message
152 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
Florian Westphalb97bac62017-08-09 20:41:48 +0200153 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
Thomas Grafe2849862007-03-22 11:48:11 -0700154 *
155 * Registers the specified function pointers (at least one of them has
156 * to be non-NULL) to be called whenever a request message for the
157 * specified protocol family and message type is received.
158 *
159 * The special protocol family PF_UNSPEC may be used to define fallback
160 * function pointers for the case when no entry for the specific protocol
161 * family exists.
162 *
163 * Returns 0 on success or a negative error code.
164 */
165int __rtnl_register(int protocol, int msgtype,
Greg Rosec7ac8672011-06-10 01:27:09 +0000166 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
Florian Westphalb97bac62017-08-09 20:41:48 +0200167 unsigned int flags)
Thomas Grafe2849862007-03-22 11:48:11 -0700168{
169 struct rtnl_link *tab;
170 int msgindex;
171
Patrick McHardy25239ce2010-04-26 16:02:05 +0200172 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
Thomas Grafe2849862007-03-22 11:48:11 -0700173 msgindex = rtm_msgindex(msgtype);
174
Florian Westphal377cb242017-08-10 16:52:57 +0200175 tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]);
Thomas Grafe2849862007-03-22 11:48:11 -0700176 if (tab == NULL) {
177 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
178 if (tab == NULL)
179 return -ENOBUFS;
180
Florian Westphal6853dd42017-08-09 20:41:51 +0200181 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
Thomas Grafe2849862007-03-22 11:48:11 -0700182 }
183
184 if (doit)
185 tab[msgindex].doit = doit;
Thomas Grafe2849862007-03-22 11:48:11 -0700186 if (dumpit)
187 tab[msgindex].dumpit = dumpit;
Florian Westphal62256f92017-08-09 20:41:52 +0200188 tab[msgindex].flags |= flags;
Thomas Grafe2849862007-03-22 11:48:11 -0700189
190 return 0;
191}
Thomas Grafe2849862007-03-22 11:48:11 -0700192EXPORT_SYMBOL_GPL(__rtnl_register);
193
194/**
195 * rtnl_register - Register a rtnetlink message type
196 *
197 * Identical to __rtnl_register() but panics on failure. This is useful
198 * as failure of this function is very unlikely, it can only happen due
199 * to lack of memory when allocating the chain to store all message
200 * handlers for a protocol. Meant for use in init functions where lack
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300201 * of memory implies no sense in continuing.
Thomas Grafe2849862007-03-22 11:48:11 -0700202 */
203void rtnl_register(int protocol, int msgtype,
Greg Rosec7ac8672011-06-10 01:27:09 +0000204 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
Florian Westphalb97bac62017-08-09 20:41:48 +0200205 unsigned int flags)
Thomas Grafe2849862007-03-22 11:48:11 -0700206{
Florian Westphalb97bac62017-08-09 20:41:48 +0200207 if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0)
Thomas Grafe2849862007-03-22 11:48:11 -0700208 panic("Unable to register rtnetlink message handler, "
209 "protocol = %d, message type = %d\n",
210 protocol, msgtype);
211}
Thomas Grafe2849862007-03-22 11:48:11 -0700212EXPORT_SYMBOL_GPL(rtnl_register);
213
214/**
215 * rtnl_unregister - Unregister a rtnetlink message type
216 * @protocol: Protocol family or PF_UNSPEC
217 * @msgtype: rtnetlink message type
218 *
219 * Returns 0 on success or a negative error code.
220 */
221int rtnl_unregister(int protocol, int msgtype)
222{
Florian Westphal6853dd42017-08-09 20:41:51 +0200223 struct rtnl_link *handlers;
Thomas Grafe2849862007-03-22 11:48:11 -0700224 int msgindex;
225
Patrick McHardy25239ce2010-04-26 16:02:05 +0200226 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
Thomas Grafe2849862007-03-22 11:48:11 -0700227 msgindex = rtm_msgindex(msgtype);
228
Florian Westphal6853dd42017-08-09 20:41:51 +0200229 rtnl_lock();
230 handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
231 if (!handlers) {
232 rtnl_unlock();
Thomas Grafe2849862007-03-22 11:48:11 -0700233 return -ENOENT;
Florian Westphal6853dd42017-08-09 20:41:51 +0200234 }
Thomas Grafe2849862007-03-22 11:48:11 -0700235
Florian Westphal6853dd42017-08-09 20:41:51 +0200236 handlers[msgindex].doit = NULL;
237 handlers[msgindex].dumpit = NULL;
Florian Westphal62256f92017-08-09 20:41:52 +0200238 handlers[msgindex].flags = 0;
Florian Westphal6853dd42017-08-09 20:41:51 +0200239 rtnl_unlock();
Thomas Grafe2849862007-03-22 11:48:11 -0700240
241 return 0;
242}
Thomas Grafe2849862007-03-22 11:48:11 -0700243EXPORT_SYMBOL_GPL(rtnl_unregister);
244
245/**
246 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
247 * @protocol : Protocol family or PF_UNSPEC
248 *
249 * Identical to calling rtnl_unregster() for all registered message types
250 * of a certain protocol family.
251 */
252void rtnl_unregister_all(int protocol)
253{
Florian Westphal019a3162017-08-09 20:41:49 +0200254 struct rtnl_link *handlers;
255
Patrick McHardy25239ce2010-04-26 16:02:05 +0200256 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
Thomas Grafe2849862007-03-22 11:48:11 -0700257
Florian Westphal019a3162017-08-09 20:41:49 +0200258 rtnl_lock();
Florian Westphal6853dd42017-08-09 20:41:51 +0200259 handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
260 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
Florian Westphal019a3162017-08-09 20:41:49 +0200261 rtnl_unlock();
262
Florian Westphal6853dd42017-08-09 20:41:51 +0200263 synchronize_net();
264
Florian Westphal019a3162017-08-09 20:41:49 +0200265 while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 0)
266 schedule();
267 kfree(handlers);
Thomas Grafe2849862007-03-22 11:48:11 -0700268}
Thomas Grafe2849862007-03-22 11:48:11 -0700269EXPORT_SYMBOL_GPL(rtnl_unregister_all);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Patrick McHardy38f7b872007-06-13 12:03:51 -0700271static LIST_HEAD(link_ops);
272
Eric Dumazetc63044f2011-12-13 11:38:00 +0000273static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
274{
275 const struct rtnl_link_ops *ops;
276
277 list_for_each_entry(ops, &link_ops, list) {
278 if (!strcmp(ops->kind, kind))
279 return ops;
280 }
281 return NULL;
282}
283
Patrick McHardy38f7b872007-06-13 12:03:51 -0700284/**
285 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
286 * @ops: struct rtnl_link_ops * to register
287 *
288 * The caller must hold the rtnl_mutex. This function should be used
289 * by drivers that create devices during module initialization. It
290 * must be called before registering the devices.
291 *
292 * Returns 0 on success or a negative error code.
293 */
294int __rtnl_link_register(struct rtnl_link_ops *ops)
295{
Eric Dumazetc63044f2011-12-13 11:38:00 +0000296 if (rtnl_link_ops_get(ops->kind))
297 return -EEXIST;
298
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +0200299 /* The check for setup is here because if ops
300 * does not have that filled up, it is not possible
301 * to use the ops for creating device. So do not
302 * fill up dellink as well. That disables rtnl_dellink.
303 */
304 if (ops->setup && !ops->dellink)
Eric Dumazet23289a32009-10-27 07:06:36 +0000305 ops->dellink = unregister_netdevice_queue;
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700306
Patrick McHardy38f7b872007-06-13 12:03:51 -0700307 list_add_tail(&ops->list, &link_ops);
308 return 0;
309}
Patrick McHardy38f7b872007-06-13 12:03:51 -0700310EXPORT_SYMBOL_GPL(__rtnl_link_register);
311
312/**
313 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
314 * @ops: struct rtnl_link_ops * to register
315 *
316 * Returns 0 on success or a negative error code.
317 */
318int rtnl_link_register(struct rtnl_link_ops *ops)
319{
320 int err;
321
322 rtnl_lock();
323 err = __rtnl_link_register(ops);
324 rtnl_unlock();
325 return err;
326}
Patrick McHardy38f7b872007-06-13 12:03:51 -0700327EXPORT_SYMBOL_GPL(rtnl_link_register);
328
Pavel Emelyanov669f87b2008-04-16 00:46:52 -0700329static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
330{
331 struct net_device *dev;
Eric Dumazet23289a32009-10-27 07:06:36 +0000332 LIST_HEAD(list_kill);
333
Pavel Emelyanov669f87b2008-04-16 00:46:52 -0700334 for_each_netdev(net, dev) {
Eric Dumazet23289a32009-10-27 07:06:36 +0000335 if (dev->rtnl_link_ops == ops)
336 ops->dellink(dev, &list_kill);
Pavel Emelyanov669f87b2008-04-16 00:46:52 -0700337 }
Eric Dumazet23289a32009-10-27 07:06:36 +0000338 unregister_netdevice_many(&list_kill);
Pavel Emelyanov669f87b2008-04-16 00:46:52 -0700339}
340
Patrick McHardy38f7b872007-06-13 12:03:51 -0700341/**
342 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
343 * @ops: struct rtnl_link_ops * to unregister
344 *
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700345 * The caller must hold the rtnl_mutex.
Patrick McHardy38f7b872007-06-13 12:03:51 -0700346 */
347void __rtnl_link_unregister(struct rtnl_link_ops *ops)
348{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700349 struct net *net;
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700350
Eric W. Biederman881d9662007-09-17 11:56:21 -0700351 for_each_net(net) {
Pavel Emelyanov669f87b2008-04-16 00:46:52 -0700352 __rtnl_kill_links(net, ops);
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700353 }
Patrick McHardy38f7b872007-06-13 12:03:51 -0700354 list_del(&ops->list);
355}
Patrick McHardy38f7b872007-06-13 12:03:51 -0700356EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
357
Cong Wang200b9162014-05-12 15:11:20 -0700358/* Return with the rtnl_lock held when there are no network
359 * devices unregistering in any network namespace.
360 */
361static void rtnl_lock_unregistering_all(void)
362{
363 struct net *net;
364 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +0100365 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Cong Wang200b9162014-05-12 15:11:20 -0700366
Peter Zijlstraff960a72014-10-29 17:04:56 +0100367 add_wait_queue(&netdev_unregistering_wq, &wait);
Cong Wang200b9162014-05-12 15:11:20 -0700368 for (;;) {
Cong Wang200b9162014-05-12 15:11:20 -0700369 unregistering = false;
370 rtnl_lock();
371 for_each_net(net) {
372 if (net->dev_unreg_count > 0) {
373 unregistering = true;
374 break;
375 }
376 }
377 if (!unregistering)
378 break;
379 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +0100380
381 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Cong Wang200b9162014-05-12 15:11:20 -0700382 }
Peter Zijlstraff960a72014-10-29 17:04:56 +0100383 remove_wait_queue(&netdev_unregistering_wq, &wait);
Cong Wang200b9162014-05-12 15:11:20 -0700384}
385
Patrick McHardy38f7b872007-06-13 12:03:51 -0700386/**
387 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
388 * @ops: struct rtnl_link_ops * to unregister
389 */
390void rtnl_link_unregister(struct rtnl_link_ops *ops)
391{
Cong Wang200b9162014-05-12 15:11:20 -0700392 /* Close the race with cleanup_net() */
393 mutex_lock(&net_mutex);
394 rtnl_lock_unregistering_all();
Patrick McHardy38f7b872007-06-13 12:03:51 -0700395 __rtnl_link_unregister(ops);
396 rtnl_unlock();
Cong Wang200b9162014-05-12 15:11:20 -0700397 mutex_unlock(&net_mutex);
Patrick McHardy38f7b872007-06-13 12:03:51 -0700398}
Patrick McHardy38f7b872007-06-13 12:03:51 -0700399EXPORT_SYMBOL_GPL(rtnl_link_unregister);
400
Jiri Pirkoba7d49b2014-01-22 09:05:55 +0100401static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
402{
403 struct net_device *master_dev;
404 const struct rtnl_link_ops *ops;
405
406 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
407 if (!master_dev)
408 return 0;
409 ops = master_dev->rtnl_link_ops;
Fernando Luis Vazquez Cao6049f252014-02-04 19:35:02 +0900410 if (!ops || !ops->get_slave_size)
Jiri Pirkoba7d49b2014-01-22 09:05:55 +0100411 return 0;
412 /* IFLA_INFO_SLAVE_DATA + nested data */
413 return nla_total_size(sizeof(struct nlattr)) +
414 ops->get_slave_size(master_dev, dev);
415}
416
Patrick McHardy38f7b872007-06-13 12:03:51 -0700417static size_t rtnl_link_get_size(const struct net_device *dev)
418{
419 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
420 size_t size;
421
422 if (!ops)
423 return 0;
424
Thomas Graf369cf772010-11-11 15:47:59 +0000425 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
426 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
Patrick McHardy38f7b872007-06-13 12:03:51 -0700427
428 if (ops->get_size)
429 /* IFLA_INFO_DATA + nested data */
Thomas Graf369cf772010-11-11 15:47:59 +0000430 size += nla_total_size(sizeof(struct nlattr)) +
Patrick McHardy38f7b872007-06-13 12:03:51 -0700431 ops->get_size(dev);
432
433 if (ops->get_xstats_size)
Thomas Graf369cf772010-11-11 15:47:59 +0000434 /* IFLA_INFO_XSTATS */
435 size += nla_total_size(ops->get_xstats_size(dev));
Patrick McHardy38f7b872007-06-13 12:03:51 -0700436
Jiri Pirkoba7d49b2014-01-22 09:05:55 +0100437 size += rtnl_link_get_slave_info_data_size(dev);
438
Patrick McHardy38f7b872007-06-13 12:03:51 -0700439 return size;
440}
441
Thomas Graff8ff1822010-11-16 04:30:14 +0000442static LIST_HEAD(rtnl_af_ops);
443
444static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
445{
446 const struct rtnl_af_ops *ops;
447
448 list_for_each_entry(ops, &rtnl_af_ops, list) {
449 if (ops->family == family)
450 return ops;
451 }
452
453 return NULL;
454}
455
456/**
Thomas Graff8ff1822010-11-16 04:30:14 +0000457 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
458 * @ops: struct rtnl_af_ops * to register
459 *
460 * Returns 0 on success or a negative error code.
461 */
stephen hemminger3678a9d2013-12-30 10:41:32 -0800462void rtnl_af_register(struct rtnl_af_ops *ops)
Thomas Graff8ff1822010-11-16 04:30:14 +0000463{
Thomas Graff8ff1822010-11-16 04:30:14 +0000464 rtnl_lock();
stephen hemminger3678a9d2013-12-30 10:41:32 -0800465 list_add_tail(&ops->list, &rtnl_af_ops);
Thomas Graff8ff1822010-11-16 04:30:14 +0000466 rtnl_unlock();
Thomas Graff8ff1822010-11-16 04:30:14 +0000467}
468EXPORT_SYMBOL_GPL(rtnl_af_register);
469
470/**
471 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
472 * @ops: struct rtnl_af_ops * to unregister
473 *
474 * The caller must hold the rtnl_mutex.
475 */
476void __rtnl_af_unregister(struct rtnl_af_ops *ops)
477{
478 list_del(&ops->list);
479}
480EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
481
482/**
483 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
484 * @ops: struct rtnl_af_ops * to unregister
485 */
486void rtnl_af_unregister(struct rtnl_af_ops *ops)
487{
488 rtnl_lock();
489 __rtnl_af_unregister(ops);
490 rtnl_unlock();
491}
492EXPORT_SYMBOL_GPL(rtnl_af_unregister);
493
Arad, Ronenb1974ed2015-10-19 09:23:28 -0700494static size_t rtnl_link_get_af_size(const struct net_device *dev,
495 u32 ext_filter_mask)
Thomas Graff8ff1822010-11-16 04:30:14 +0000496{
497 struct rtnl_af_ops *af_ops;
498 size_t size;
499
500 /* IFLA_AF_SPEC */
501 size = nla_total_size(sizeof(struct nlattr));
502
503 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
504 if (af_ops->get_link_af_size) {
505 /* AF_* + nested data */
506 size += nla_total_size(sizeof(struct nlattr)) +
Arad, Ronenb1974ed2015-10-19 09:23:28 -0700507 af_ops->get_link_af_size(dev, ext_filter_mask);
Thomas Graff8ff1822010-11-16 04:30:14 +0000508 }
509 }
510
511 return size;
512}
513
Jiri Pirkoba7d49b2014-01-22 09:05:55 +0100514static bool rtnl_have_link_slave_info(const struct net_device *dev)
515{
516 struct net_device *master_dev;
517
518 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
Jiri Pirko813f0202014-01-23 19:19:21 +0100519 if (master_dev && master_dev->rtnl_link_ops)
Jiri Pirkoba7d49b2014-01-22 09:05:55 +0100520 return true;
521 return false;
522}
523
524static int rtnl_link_slave_info_fill(struct sk_buff *skb,
525 const struct net_device *dev)
526{
527 struct net_device *master_dev;
528 const struct rtnl_link_ops *ops;
529 struct nlattr *slave_data;
530 int err;
531
532 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
533 if (!master_dev)
534 return 0;
535 ops = master_dev->rtnl_link_ops;
536 if (!ops)
537 return 0;
538 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
539 return -EMSGSIZE;
540 if (ops->fill_slave_info) {
541 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
542 if (!slave_data)
543 return -EMSGSIZE;
544 err = ops->fill_slave_info(skb, master_dev, dev);
545 if (err < 0)
546 goto err_cancel_slave_data;
547 nla_nest_end(skb, slave_data);
548 }
549 return 0;
550
551err_cancel_slave_data:
552 nla_nest_cancel(skb, slave_data);
553 return err;
554}
555
556static int rtnl_link_info_fill(struct sk_buff *skb,
557 const struct net_device *dev)
Patrick McHardy38f7b872007-06-13 12:03:51 -0700558{
559 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
Jiri Pirkoba7d49b2014-01-22 09:05:55 +0100560 struct nlattr *data;
561 int err;
562
563 if (!ops)
564 return 0;
565 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
566 return -EMSGSIZE;
567 if (ops->fill_xstats) {
568 err = ops->fill_xstats(skb, dev);
569 if (err < 0)
570 return err;
571 }
572 if (ops->fill_info) {
573 data = nla_nest_start(skb, IFLA_INFO_DATA);
574 if (data == NULL)
575 return -EMSGSIZE;
576 err = ops->fill_info(skb, dev);
577 if (err < 0)
578 goto err_cancel_data;
579 nla_nest_end(skb, data);
580 }
581 return 0;
582
583err_cancel_data:
584 nla_nest_cancel(skb, data);
585 return err;
586}
587
588static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
589{
590 struct nlattr *linkinfo;
Patrick McHardy38f7b872007-06-13 12:03:51 -0700591 int err = -EMSGSIZE;
592
593 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
594 if (linkinfo == NULL)
595 goto out;
596
Jiri Pirkoba7d49b2014-01-22 09:05:55 +0100597 err = rtnl_link_info_fill(skb, dev);
598 if (err < 0)
Patrick McHardy38f7b872007-06-13 12:03:51 -0700599 goto err_cancel_link;
Jiri Pirkoba7d49b2014-01-22 09:05:55 +0100600
601 err = rtnl_link_slave_info_fill(skb, dev);
602 if (err < 0)
603 goto err_cancel_link;
Patrick McHardy38f7b872007-06-13 12:03:51 -0700604
605 nla_nest_end(skb, linkinfo);
606 return 0;
607
Patrick McHardy38f7b872007-06-13 12:03:51 -0700608err_cancel_link:
609 nla_nest_cancel(skb, linkinfo);
610out:
611 return err;
612}
613
Eric Dumazet95c96172012-04-15 05:58:06 +0000614int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800616 struct sock *rtnl = net->rtnl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 int err = 0;
618
Patrick McHardyac6d4392005-08-14 19:29:52 -0700619 NETLINK_CB(skb).dst_group = group;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 if (echo)
Reshetova, Elena63354792017-06-30 13:07:58 +0300621 refcount_inc(&skb->users);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
623 if (echo)
624 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
625 return err;
626}
627
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800628int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
Thomas Graf2942e902006-08-15 00:30:25 -0700629{
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800630 struct sock *rtnl = net->rtnl;
631
Thomas Graf2942e902006-08-15 00:30:25 -0700632 return nlmsg_unicast(rtnl, skb, pid);
633}
Eric Dumazete0d087a2009-11-07 01:26:17 -0800634EXPORT_SYMBOL(rtnl_unicast);
Thomas Graf2942e902006-08-15 00:30:25 -0700635
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -0800636void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
637 struct nlmsghdr *nlh, gfp_t flags)
Thomas Graf97676b62006-08-15 00:31:41 -0700638{
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800639 struct sock *rtnl = net->rtnl;
Thomas Graf97676b62006-08-15 00:31:41 -0700640 int report = 0;
641
642 if (nlh)
643 report = nlmsg_report(nlh);
644
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -0800645 nlmsg_notify(rtnl, skb, pid, group, report, flags);
Thomas Graf97676b62006-08-15 00:31:41 -0700646}
Eric Dumazete0d087a2009-11-07 01:26:17 -0800647EXPORT_SYMBOL(rtnl_notify);
Thomas Graf97676b62006-08-15 00:31:41 -0700648
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800649void rtnl_set_sk_err(struct net *net, u32 group, int error)
Thomas Graf97676b62006-08-15 00:31:41 -0700650{
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800651 struct sock *rtnl = net->rtnl;
652
Thomas Graf97676b62006-08-15 00:31:41 -0700653 netlink_set_err(rtnl, 0, group, error);
654}
Eric Dumazete0d087a2009-11-07 01:26:17 -0800655EXPORT_SYMBOL(rtnl_set_sk_err);
Thomas Graf97676b62006-08-15 00:31:41 -0700656
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
658{
Thomas Graf2d7202b2006-08-22 00:01:27 -0700659 struct nlattr *mx;
660 int i, valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Thomas Graf2d7202b2006-08-22 00:01:27 -0700662 mx = nla_nest_start(skb, RTA_METRICS);
663 if (mx == NULL)
664 return -ENOBUFS;
665
666 for (i = 0; i < RTAX_MAX; i++) {
667 if (metrics[i]) {
Daniel Borkmannea697632015-01-05 23:57:47 +0100668 if (i == RTAX_CC_ALGO - 1) {
669 char tmp[TCP_CA_NAME_MAX], *name;
670
671 name = tcp_ca_get_name_by_key(metrics[i], tmp);
672 if (!name)
673 continue;
674 if (nla_put_string(skb, i + 1, name))
675 goto nla_put_failure;
Daniel Borkmannc3a8d942015-08-31 15:58:47 +0200676 } else if (i == RTAX_FEATURES - 1) {
677 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
678
Phil Sutterf8edcd12016-08-23 13:14:31 +0200679 if (!user_features)
680 continue;
Daniel Borkmannc3a8d942015-08-31 15:58:47 +0200681 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
682 if (nla_put_u32(skb, i + 1, user_features))
683 goto nla_put_failure;
Daniel Borkmannea697632015-01-05 23:57:47 +0100684 } else {
685 if (nla_put_u32(skb, i + 1, metrics[i]))
686 goto nla_put_failure;
687 }
Thomas Graf2d7202b2006-08-22 00:01:27 -0700688 valid++;
Thomas Graf2d7202b2006-08-22 00:01:27 -0700689 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
David S. Millera57d27f2006-08-22 22:20:14 -0700692 if (!valid) {
693 nla_nest_cancel(skb, mx);
694 return 0;
695 }
Thomas Graf2d7202b2006-08-22 00:01:27 -0700696
697 return nla_nest_end(skb, mx);
698
699nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700700 nla_nest_cancel(skb, mx);
701 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702}
Eric Dumazete0d087a2009-11-07 01:26:17 -0800703EXPORT_SYMBOL(rtnetlink_put_metrics);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Thomas Grafe3703b32006-11-27 09:27:07 -0800705int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
David S. Miller87a50692012-07-10 05:06:14 -0700706 long expires, u32 error)
Thomas Grafe3703b32006-11-27 09:27:07 -0800707{
708 struct rta_cacheinfo ci = {
Eric Dumazeta399a802012-08-08 21:13:53 +0000709 .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
Thomas Grafe3703b32006-11-27 09:27:07 -0800710 .rta_used = dst->__use,
711 .rta_clntref = atomic_read(&(dst->__refcnt)),
712 .rta_error = error,
713 .rta_id = id,
Thomas Grafe3703b32006-11-27 09:27:07 -0800714 };
715
Li Wei82539472012-07-29 16:01:30 +0000716 if (expires) {
717 unsigned long clock;
Thomas Grafe3703b32006-11-27 09:27:07 -0800718
Li Wei82539472012-07-29 16:01:30 +0000719 clock = jiffies_to_clock_t(abs(expires));
720 clock = min_t(unsigned long, clock, INT_MAX);
721 ci.rta_expires = (expires > 0) ? clock : -clock;
722 }
Thomas Grafe3703b32006-11-27 09:27:07 -0800723 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
724}
Thomas Grafe3703b32006-11-27 09:27:07 -0800725EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
David S. Miller93b2d4a2008-02-17 18:35:07 -0800727static void set_operstate(struct net_device *dev, unsigned char transition)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800728{
729 unsigned char operstate = dev->operstate;
730
Eric Dumazete0d087a2009-11-07 01:26:17 -0800731 switch (transition) {
Stefan Rompfb00055a2006-03-20 17:09:11 -0800732 case IF_OPER_UP:
733 if ((operstate == IF_OPER_DORMANT ||
734 operstate == IF_OPER_UNKNOWN) &&
735 !netif_dormant(dev))
736 operstate = IF_OPER_UP;
737 break;
738
739 case IF_OPER_DORMANT:
740 if (operstate == IF_OPER_UP ||
741 operstate == IF_OPER_UNKNOWN)
742 operstate = IF_OPER_DORMANT;
743 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700744 }
Stefan Rompfb00055a2006-03-20 17:09:11 -0800745
746 if (dev->operstate != operstate) {
747 write_lock_bh(&dev_base_lock);
748 dev->operstate = operstate;
749 write_unlock_bh(&dev_base_lock);
David S. Miller93b2d4a2008-02-17 18:35:07 -0800750 netdev_state_change(dev);
751 }
Stefan Rompfb00055a2006-03-20 17:09:11 -0800752}
753
Jiri Bencb1beb682012-07-27 02:58:22 +0000754static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
755{
756 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
757 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
758}
759
Patrick McHardy3729d502010-02-26 06:34:54 +0000760static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
761 const struct ifinfomsg *ifm)
762{
763 unsigned int flags = ifm->ifi_flags;
764
765 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
766 if (ifm->ifi_change)
767 flags = (flags & ifm->ifi_change) |
Jiri Bencb1beb682012-07-27 02:58:22 +0000768 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
Patrick McHardy3729d502010-02-26 06:34:54 +0000769
770 return flags;
771}
772
Thomas Grafb60c5112006-08-04 23:05:34 -0700773static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000774 const struct rtnl_link_stats64 *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Thomas Grafb60c5112006-08-04 23:05:34 -0700776 a->rx_packets = b->rx_packets;
777 a->tx_packets = b->tx_packets;
778 a->rx_bytes = b->rx_bytes;
779 a->tx_bytes = b->tx_bytes;
780 a->rx_errors = b->rx_errors;
781 a->tx_errors = b->tx_errors;
782 a->rx_dropped = b->rx_dropped;
783 a->tx_dropped = b->tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
Thomas Grafb60c5112006-08-04 23:05:34 -0700785 a->multicast = b->multicast;
786 a->collisions = b->collisions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
Thomas Grafb60c5112006-08-04 23:05:34 -0700788 a->rx_length_errors = b->rx_length_errors;
789 a->rx_over_errors = b->rx_over_errors;
790 a->rx_crc_errors = b->rx_crc_errors;
791 a->rx_frame_errors = b->rx_frame_errors;
792 a->rx_fifo_errors = b->rx_fifo_errors;
793 a->rx_missed_errors = b->rx_missed_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794
Thomas Grafb60c5112006-08-04 23:05:34 -0700795 a->tx_aborted_errors = b->tx_aborted_errors;
796 a->tx_carrier_errors = b->tx_carrier_errors;
797 a->tx_fifo_errors = b->tx_fifo_errors;
798 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
799 a->tx_window_errors = b->tx_window_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Thomas Grafb60c5112006-08-04 23:05:34 -0700801 a->rx_compressed = b->rx_compressed;
802 a->tx_compressed = b->tx_compressed;
Jarod Wilson6e7333d2016-02-01 18:51:05 -0500803
804 a->rx_nohandler = b->rx_nohandler;
Jan Engelhardt10708f32010-03-11 09:57:29 +0000805}
806
Chris Wrightc02db8c2010-05-16 01:05:45 -0700807/* All VF info */
Greg Rose115c9b82012-02-21 16:54:48 -0500808static inline int rtnl_vfinfo_size(const struct net_device *dev,
809 u32 ext_filter_mask)
Williams, Mitch Aebc08a62010-02-10 01:44:05 +0000810{
Phil Sutter9af15c32017-01-18 14:04:39 +0100811 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
Chris Wrightc02db8c2010-05-16 01:05:45 -0700812 int num_vfs = dev_num_vf(dev->dev.parent);
Sabrina Dubroca7e75f742016-11-15 10:39:03 +0100813 size_t size = nla_total_size(0);
Scott Feldman045de012010-05-28 03:42:43 -0700814 size += num_vfs *
Sabrina Dubroca7e75f742016-11-15 10:39:03 +0100815 (nla_total_size(0) +
816 nla_total_size(sizeof(struct ifla_vf_mac)) +
817 nla_total_size(sizeof(struct ifla_vf_vlan)) +
818 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
Moshe Shemesh79aab092016-09-22 12:11:15 +0300819 nla_total_size(MAX_VLAN_LIST_LEN *
820 sizeof(struct ifla_vf_vlan_info)) +
Sucheta Chakrabortyed616682014-05-22 09:59:05 -0400821 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
Sabrina Dubroca7e75f742016-11-15 10:39:03 +0100822 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
Jiri Benc945a3672014-08-08 16:44:32 +0200823 nla_total_size(sizeof(struct ifla_vf_rate)) +
Vlad Zolotarov01a3d792015-03-30 21:35:23 +0300824 nla_total_size(sizeof(struct ifla_vf_link_state)) +
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +0300825 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
Sabrina Dubroca7e75f742016-11-15 10:39:03 +0100826 nla_total_size(0) + /* nest IFLA_VF_STATS */
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +0300827 /* IFLA_VF_STATS_RX_PACKETS */
Nicolas Dichtel343a6d82016-04-25 10:25:14 +0200828 nla_total_size_64bit(sizeof(__u64)) +
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +0300829 /* IFLA_VF_STATS_TX_PACKETS */
Nicolas Dichtel343a6d82016-04-25 10:25:14 +0200830 nla_total_size_64bit(sizeof(__u64)) +
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +0300831 /* IFLA_VF_STATS_RX_BYTES */
Nicolas Dichtel343a6d82016-04-25 10:25:14 +0200832 nla_total_size_64bit(sizeof(__u64)) +
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +0300833 /* IFLA_VF_STATS_TX_BYTES */
Nicolas Dichtel343a6d82016-04-25 10:25:14 +0200834 nla_total_size_64bit(sizeof(__u64)) +
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +0300835 /* IFLA_VF_STATS_BROADCAST */
Nicolas Dichtel343a6d82016-04-25 10:25:14 +0200836 nla_total_size_64bit(sizeof(__u64)) +
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +0300837 /* IFLA_VF_STATS_MULTICAST */
Nicolas Dichtel343a6d82016-04-25 10:25:14 +0200838 nla_total_size_64bit(sizeof(__u64)) +
Hiroshi Shimamotodd461d62015-08-28 06:57:55 +0000839 nla_total_size(sizeof(struct ifla_vf_trust)));
Chris Wrightc02db8c2010-05-16 01:05:45 -0700840 return size;
841 } else
Williams, Mitch Aebc08a62010-02-10 01:44:05 +0000842 return 0;
843}
844
David Gibsonc53864f2014-04-24 10:22:36 +1000845static size_t rtnl_port_size(const struct net_device *dev,
846 u32 ext_filter_mask)
Scott Feldman57b61082010-05-17 22:49:55 -0700847{
848 size_t port_size = nla_total_size(4) /* PORT_VF */
849 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
Scott Feldman57b61082010-05-17 22:49:55 -0700850 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
851 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
852 + nla_total_size(1) /* PROT_VDP_REQUEST */
853 + nla_total_size(2); /* PORT_VDP_RESPONSE */
854 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
855 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
856 + port_size;
857 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
858 + port_size;
859
David Gibsonc53864f2014-04-24 10:22:36 +1000860 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
861 !(ext_filter_mask & RTEXT_FILTER_VF))
Scott Feldman57b61082010-05-17 22:49:55 -0700862 return 0;
863 if (dev_num_vf(dev->dev.parent))
864 return port_self_size + vf_ports_size +
865 vf_port_size * dev_num_vf(dev->dev.parent);
866 else
867 return port_self_size;
868}
869
David S. Millerb5cdae32017-04-18 15:36:58 -0400870static size_t rtnl_xdp_size(void)
Brenden Blancod1fdd912016-07-19 12:16:49 -0700871{
Sabrina Dubrocab3cfaa32016-11-15 11:16:35 +0100872 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
Martin KaFai Lau58038692017-06-15 17:29:09 -0700873 nla_total_size(1) + /* XDP_ATTACHED */
874 nla_total_size(4); /* XDP_PROG_ID */
Brenden Blancod1fdd912016-07-19 12:16:49 -0700875
David S. Millerb5cdae32017-04-18 15:36:58 -0400876 return xdp_size;
Brenden Blancod1fdd912016-07-19 12:16:49 -0700877}
878
Greg Rose115c9b82012-02-21 16:54:48 -0500879static noinline size_t if_nlmsg_size(const struct net_device *dev,
880 u32 ext_filter_mask)
Thomas Graf339bf982006-11-10 14:10:15 -0800881{
882 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
883 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700884 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
Thomas Graf339bf982006-11-10 14:10:15 -0800885 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
Nicolas Dichtel270cb4d2016-04-26 10:06:16 +0200886 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
Thomas Graf339bf982006-11-10 14:10:15 -0800887 + nla_total_size(sizeof(struct rtnl_link_stats))
David S. Miller35c58452016-04-19 19:49:29 -0400888 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
Thomas Graf339bf982006-11-10 14:10:15 -0800889 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
890 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
891 + nla_total_size(4) /* IFLA_TXQLEN */
892 + nla_total_size(4) /* IFLA_WEIGHT */
893 + nla_total_size(4) /* IFLA_MTU */
894 + nla_total_size(4) /* IFLA_LINK */
895 + nla_total_size(4) /* IFLA_MASTER */
Jiri Pirko9a572472012-12-27 23:49:39 +0000896 + nla_total_size(1) /* IFLA_CARRIER */
Ben Greearedbc0bb2012-03-29 12:51:30 +0000897 + nla_total_size(4) /* IFLA_PROMISCUITY */
Jiri Pirko76ff5cc2012-07-20 02:28:48 +0000898 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
899 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
Tobias Klauser69197562016-11-30 14:30:37 +0100900 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
901 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
Thomas Graf339bf982006-11-10 14:10:15 -0800902 + nla_total_size(1) /* IFLA_OPERSTATE */
Patrick McHardy38f7b872007-06-13 12:03:51 -0700903 + nla_total_size(1) /* IFLA_LINKMODE */
david decotigny2d3b4792014-03-29 09:48:35 -0700904 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
Nicolas Dichteld37512a2015-01-15 15:11:16 +0100905 + nla_total_size(4) /* IFLA_LINK_NETNSID */
Serhey Popovychdb833d42017-06-20 14:35:23 +0300906 + nla_total_size(4) /* IFLA_GROUP */
Greg Rose115c9b82012-02-21 16:54:48 -0500907 + nla_total_size(ext_filter_mask
908 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
909 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
David Gibsonc53864f2014-04-24 10:22:36 +1000910 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
Thomas Graff8ff1822010-11-16 04:30:14 +0000911 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
Arad, Ronenb1974ed2015-10-19 09:23:28 -0700912 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
Jiri Pirko82f28412014-11-28 14:34:18 +0100913 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
Anuradha Karuppiah88d63782015-07-14 13:43:20 -0700914 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
Nicolas Dichtelc57c7a92016-03-31 18:10:31 +0200915 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
David S. Millerb5cdae32017-04-18 15:36:58 -0400916 + rtnl_xdp_size() /* IFLA_XDP */
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -0400917 + nla_total_size(4) /* IFLA_EVENT */
Anuradha Karuppiah88d63782015-07-14 13:43:20 -0700918 + nla_total_size(1); /* IFLA_PROTO_DOWN */
919
Thomas Graf339bf982006-11-10 14:10:15 -0800920}
921
Scott Feldman57b61082010-05-17 22:49:55 -0700922static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
923{
924 struct nlattr *vf_ports;
925 struct nlattr *vf_port;
926 int vf;
927 int err;
928
929 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
930 if (!vf_ports)
931 return -EMSGSIZE;
932
933 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
934 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
Scott Feldman8ca94182010-05-28 03:42:18 -0700935 if (!vf_port)
936 goto nla_put_failure;
David S. Millera6574342012-04-01 20:12:00 -0400937 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
938 goto nla_put_failure;
Scott Feldman57b61082010-05-17 22:49:55 -0700939 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
Scott Feldman8ca94182010-05-28 03:42:18 -0700940 if (err == -EMSGSIZE)
941 goto nla_put_failure;
Scott Feldman57b61082010-05-17 22:49:55 -0700942 if (err) {
Scott Feldman57b61082010-05-17 22:49:55 -0700943 nla_nest_cancel(skb, vf_port);
944 continue;
945 }
946 nla_nest_end(skb, vf_port);
947 }
948
949 nla_nest_end(skb, vf_ports);
950
951 return 0;
Scott Feldman8ca94182010-05-28 03:42:18 -0700952
953nla_put_failure:
954 nla_nest_cancel(skb, vf_ports);
955 return -EMSGSIZE;
Scott Feldman57b61082010-05-17 22:49:55 -0700956}
957
958static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
959{
960 struct nlattr *port_self;
961 int err;
962
963 port_self = nla_nest_start(skb, IFLA_PORT_SELF);
964 if (!port_self)
965 return -EMSGSIZE;
966
967 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
968 if (err) {
969 nla_nest_cancel(skb, port_self);
Scott Feldman8ca94182010-05-28 03:42:18 -0700970 return (err == -EMSGSIZE) ? err : 0;
Scott Feldman57b61082010-05-17 22:49:55 -0700971 }
972
973 nla_nest_end(skb, port_self);
974
975 return 0;
976}
977
David Gibsonc53864f2014-04-24 10:22:36 +1000978static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
979 u32 ext_filter_mask)
Scott Feldman57b61082010-05-17 22:49:55 -0700980{
981 int err;
982
David Gibsonc53864f2014-04-24 10:22:36 +1000983 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
984 !(ext_filter_mask & RTEXT_FILTER_VF))
Scott Feldman57b61082010-05-17 22:49:55 -0700985 return 0;
986
987 err = rtnl_port_self_fill(skb, dev);
988 if (err)
989 return err;
990
991 if (dev_num_vf(dev->dev.parent)) {
992 err = rtnl_vf_ports_fill(skb, dev);
993 if (err)
994 return err;
995 }
996
997 return 0;
998}
999
Jiri Pirko66cae9e2013-07-29 18:16:50 +02001000static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1001{
1002 int err;
Jiri Pirko02637fc2014-11-28 14:34:16 +01001003 struct netdev_phys_item_id ppid;
Jiri Pirko66cae9e2013-07-29 18:16:50 +02001004
1005 err = dev_get_phys_port_id(dev, &ppid);
1006 if (err) {
1007 if (err == -EOPNOTSUPP)
1008 return 0;
1009 return err;
1010 }
1011
1012 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1013 return -EMSGSIZE;
1014
1015 return 0;
1016}
1017
David Aherndb24a902015-03-17 20:23:15 -06001018static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1019{
1020 char name[IFNAMSIZ];
1021 int err;
1022
1023 err = dev_get_phys_port_name(dev, name, sizeof(name));
1024 if (err) {
1025 if (err == -EOPNOTSUPP)
1026 return 0;
1027 return err;
1028 }
1029
Michal Schmidt77ef0332017-05-04 16:48:58 +02001030 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
David Aherndb24a902015-03-17 20:23:15 -06001031 return -EMSGSIZE;
1032
1033 return 0;
1034}
1035
Jiri Pirko82f28412014-11-28 14:34:18 +01001036static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1037{
1038 int err;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001039 struct switchdev_attr attr = {
Ido Schimmel6ff64f62015-12-15 16:03:35 +01001040 .orig_dev = dev,
Jiri Pirko1f868392015-10-01 11:03:42 +02001041 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001042 .flags = SWITCHDEV_F_NO_RECURSE,
1043 };
Jiri Pirko82f28412014-11-28 14:34:18 +01001044
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001045 err = switchdev_port_attr_get(dev, &attr);
Jiri Pirko82f28412014-11-28 14:34:18 +01001046 if (err) {
1047 if (err == -EOPNOTSUPP)
1048 return 0;
1049 return err;
1050 }
1051
Scott Feldman42275bd2015-05-13 11:16:50 -07001052 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
1053 attr.u.ppid.id))
Jiri Pirko82f28412014-11-28 14:34:18 +01001054 return -EMSGSIZE;
1055
1056 return 0;
1057}
1058
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001059static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1060 struct net_device *dev)
1061{
Roopa Prabhu550bce52016-04-15 20:36:25 -07001062 struct rtnl_link_stats64 *sp;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001063 struct nlattr *attr;
1064
Nicolas Dichtel58414d32016-04-21 18:58:25 +02001065 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1066 sizeof(struct rtnl_link_stats64), IFLA_PAD);
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001067 if (!attr)
1068 return -EMSGSIZE;
1069
Roopa Prabhu550bce52016-04-15 20:36:25 -07001070 sp = nla_data(attr);
1071 dev_get_stats(dev, sp);
1072
1073 attr = nla_reserve(skb, IFLA_STATS,
1074 sizeof(struct rtnl_link_stats));
1075 if (!attr)
1076 return -EMSGSIZE;
1077
1078 copy_rtnl_link_stats(nla_data(attr), sp);
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001079
1080 return 0;
1081}
1082
1083static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1084 struct net_device *dev,
1085 int vfs_num,
1086 struct nlattr *vfinfo)
1087{
1088 struct ifla_vf_rss_query_en vf_rss_query_en;
Moshe Shemesh79aab092016-09-22 12:11:15 +03001089 struct nlattr *vf, *vfstats, *vfvlanlist;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001090 struct ifla_vf_link_state vf_linkstate;
Moshe Shemesh79aab092016-09-22 12:11:15 +03001091 struct ifla_vf_vlan_info vf_vlan_info;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001092 struct ifla_vf_spoofchk vf_spoofchk;
1093 struct ifla_vf_tx_rate vf_tx_rate;
1094 struct ifla_vf_stats vf_stats;
1095 struct ifla_vf_trust vf_trust;
1096 struct ifla_vf_vlan vf_vlan;
1097 struct ifla_vf_rate vf_rate;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001098 struct ifla_vf_mac vf_mac;
1099 struct ifla_vf_info ivi;
1100
Mintz, Yuval0eed9cf2017-06-07 21:00:33 +03001101 memset(&ivi, 0, sizeof(ivi));
1102
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001103 /* Not all SR-IOV capable drivers support the
1104 * spoofcheck and "RSS query enable" query. Preset to
1105 * -1 so the user space tool can detect that the driver
1106 * didn't report anything.
1107 */
1108 ivi.spoofchk = -1;
1109 ivi.rss_query_en = -1;
1110 ivi.trusted = -1;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001111 /* The default value for VF link state is "auto"
1112 * IFLA_VF_LINK_STATE_AUTO which equals zero
1113 */
1114 ivi.linkstate = 0;
Moshe Shemesh79aab092016-09-22 12:11:15 +03001115 /* VLAN Protocol by default is 802.1Q */
1116 ivi.vlan_proto = htons(ETH_P_8021Q);
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001117 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1118 return 0;
1119
Dan Carpenter775f4f02016-10-13 11:45:28 +03001120 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1121
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001122 vf_mac.vf =
1123 vf_vlan.vf =
Moshe Shemesh79aab092016-09-22 12:11:15 +03001124 vf_vlan_info.vf =
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001125 vf_rate.vf =
1126 vf_tx_rate.vf =
1127 vf_spoofchk.vf =
1128 vf_linkstate.vf =
1129 vf_rss_query_en.vf =
1130 vf_trust.vf = ivi.vf;
1131
1132 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1133 vf_vlan.vlan = ivi.vlan;
1134 vf_vlan.qos = ivi.qos;
Moshe Shemesh79aab092016-09-22 12:11:15 +03001135 vf_vlan_info.vlan = ivi.vlan;
1136 vf_vlan_info.qos = ivi.qos;
1137 vf_vlan_info.vlan_proto = ivi.vlan_proto;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001138 vf_tx_rate.rate = ivi.max_tx_rate;
1139 vf_rate.min_tx_rate = ivi.min_tx_rate;
1140 vf_rate.max_tx_rate = ivi.max_tx_rate;
1141 vf_spoofchk.setting = ivi.spoofchk;
1142 vf_linkstate.link_state = ivi.linkstate;
1143 vf_rss_query_en.setting = ivi.rss_query_en;
1144 vf_trust.setting = ivi.trusted;
1145 vf = nla_nest_start(skb, IFLA_VF_INFO);
Moshe Shemesh79aab092016-09-22 12:11:15 +03001146 if (!vf)
1147 goto nla_put_vfinfo_failure;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001148 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1149 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1150 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1151 &vf_rate) ||
1152 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1153 &vf_tx_rate) ||
1154 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1155 &vf_spoofchk) ||
1156 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1157 &vf_linkstate) ||
1158 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1159 sizeof(vf_rss_query_en),
1160 &vf_rss_query_en) ||
1161 nla_put(skb, IFLA_VF_TRUST,
1162 sizeof(vf_trust), &vf_trust))
Moshe Shemesh79aab092016-09-22 12:11:15 +03001163 goto nla_put_vf_failure;
1164 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
1165 if (!vfvlanlist)
1166 goto nla_put_vf_failure;
1167 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1168 &vf_vlan_info)) {
1169 nla_nest_cancel(skb, vfvlanlist);
1170 goto nla_put_vf_failure;
1171 }
1172 nla_nest_end(skb, vfvlanlist);
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001173 memset(&vf_stats, 0, sizeof(vf_stats));
1174 if (dev->netdev_ops->ndo_get_vf_stats)
1175 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1176 &vf_stats);
1177 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
Moshe Shemesh79aab092016-09-22 12:11:15 +03001178 if (!vfstats)
1179 goto nla_put_vf_failure;
Nicolas Dichtel343a6d82016-04-25 10:25:14 +02001180 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1181 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1182 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1183 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1184 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1185 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1186 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1187 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1188 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1189 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1190 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
Moshe Shemesh79aab092016-09-22 12:11:15 +03001191 vf_stats.multicast, IFLA_VF_STATS_PAD)) {
1192 nla_nest_cancel(skb, vfstats);
1193 goto nla_put_vf_failure;
1194 }
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001195 nla_nest_end(skb, vfstats);
1196 nla_nest_end(skb, vf);
1197 return 0;
Moshe Shemesh79aab092016-09-22 12:11:15 +03001198
1199nla_put_vf_failure:
1200 nla_nest_cancel(skb, vf);
1201nla_put_vfinfo_failure:
1202 nla_nest_cancel(skb, vfinfo);
1203 return -EMSGSIZE;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001204}
1205
1206static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1207{
Kangjie Lu5f8e4472016-05-03 16:46:24 -04001208 struct rtnl_link_ifmap map;
1209
1210 memset(&map, 0, sizeof(map));
1211 map.mem_start = dev->mem_start;
1212 map.mem_end = dev->mem_end;
1213 map.base_addr = dev->base_addr;
1214 map.irq = dev->irq;
1215 map.dma = dev->dma;
1216 map.port = dev->if_port;
1217
Nicolas Dichtel270cb4d2016-04-26 10:06:16 +02001218 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001219 return -EMSGSIZE;
1220
1221 return 0;
1222}
1223
Martin KaFai Lau58038692017-06-15 17:29:09 -07001224static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02001225{
1226 const struct net_device_ops *ops = dev->netdev_ops;
Martin KaFai Lau58038692017-06-15 17:29:09 -07001227 const struct bpf_prog *generic_xdp_prog;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02001228
1229 ASSERT_RTNL();
1230
Martin KaFai Lau58038692017-06-15 17:29:09 -07001231 *prog_id = 0;
1232 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1233 if (generic_xdp_prog) {
1234 *prog_id = generic_xdp_prog->aux->id;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02001235 return XDP_ATTACHED_SKB;
Martin KaFai Lau58038692017-06-15 17:29:09 -07001236 }
Jakub Kicinskice158e52017-06-21 18:25:09 -07001237 if (!ops->ndo_xdp)
1238 return XDP_ATTACHED_NONE;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02001239
Jakub Kicinskice158e52017-06-21 18:25:09 -07001240 return __dev_xdp_attached(dev, ops->ndo_xdp, prog_id);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02001241}
1242
Brenden Blancod1fdd912016-07-19 12:16:49 -07001243static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1244{
Brenden Blancod1fdd912016-07-19 12:16:49 -07001245 struct nlattr *xdp;
Martin KaFai Lau58038692017-06-15 17:29:09 -07001246 u32 prog_id;
Brenden Blancod1fdd912016-07-19 12:16:49 -07001247 int err;
1248
Brenden Blancod1fdd912016-07-19 12:16:49 -07001249 xdp = nla_nest_start(skb, IFLA_XDP);
1250 if (!xdp)
1251 return -EMSGSIZE;
David S. Millerb5cdae32017-04-18 15:36:58 -04001252
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02001253 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
Martin KaFai Lau58038692017-06-15 17:29:09 -07001254 rtnl_xdp_attached_mode(dev, &prog_id));
Brenden Blancod1fdd912016-07-19 12:16:49 -07001255 if (err)
1256 goto err_cancel;
1257
Martin KaFai Lau58038692017-06-15 17:29:09 -07001258 if (prog_id) {
1259 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1260 if (err)
1261 goto err_cancel;
1262 }
1263
Brenden Blancod1fdd912016-07-19 12:16:49 -07001264 nla_nest_end(skb, xdp);
1265 return 0;
1266
1267err_cancel:
1268 nla_nest_cancel(skb, xdp);
1269 return err;
1270}
1271
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04001272static u32 rtnl_get_event(unsigned long event)
1273{
1274 u32 rtnl_event_type = IFLA_EVENT_NONE;
1275
1276 switch (event) {
1277 case NETDEV_REBOOT:
1278 rtnl_event_type = IFLA_EVENT_REBOOT;
1279 break;
1280 case NETDEV_FEAT_CHANGE:
1281 rtnl_event_type = IFLA_EVENT_FEATURES;
1282 break;
1283 case NETDEV_BONDING_FAILOVER:
1284 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1285 break;
1286 case NETDEV_NOTIFY_PEERS:
1287 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1288 break;
1289 case NETDEV_RESEND_IGMP:
1290 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1291 break;
1292 case NETDEV_CHANGEINFODATA:
1293 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1294 break;
1295 default:
1296 break;
1297 }
1298
1299 return rtnl_event_type;
1300}
1301
Thomas Grafb60c5112006-08-04 23:05:34 -07001302static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
Patrick McHardy575c3e22007-05-22 17:00:49 -07001303 int type, u32 pid, u32 seq, u32 change,
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04001304 unsigned int flags, u32 ext_filter_mask,
1305 u32 event)
Thomas Grafb60c5112006-08-04 23:05:34 -07001306{
1307 struct ifinfomsg *ifm;
1308 struct nlmsghdr *nlh;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001309 struct nlattr *af_spec;
Thomas Graff8ff1822010-11-16 04:30:14 +00001310 struct rtnl_af_ops *af_ops;
Jiri Pirko898e5062013-01-03 22:48:52 +00001311 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
Thomas Grafb60c5112006-08-04 23:05:34 -07001312
Eric Dumazet2907c352011-05-25 07:34:04 +00001313 ASSERT_RTNL();
Thomas Grafb60c5112006-08-04 23:05:34 -07001314 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1315 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08001316 return -EMSGSIZE;
Thomas Grafb60c5112006-08-04 23:05:34 -07001317
1318 ifm = nlmsg_data(nlh);
1319 ifm->ifi_family = AF_UNSPEC;
1320 ifm->__ifi_pad = 0;
1321 ifm->ifi_type = dev->type;
1322 ifm->ifi_index = dev->ifindex;
1323 ifm->ifi_flags = dev_get_flags(dev);
1324 ifm->ifi_change = change;
1325
David S. Millera6574342012-04-01 20:12:00 -04001326 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1327 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1328 nla_put_u8(skb, IFLA_OPERSTATE,
1329 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1330 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1331 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1332 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
Ben Greearedbc0bb2012-03-29 12:51:30 +00001333 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
Jiri Pirko76ff5cc2012-07-20 02:28:48 +00001334 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
Eric Dumazetc70ce022016-03-21 09:55:10 -07001335 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1336 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
Mark A. Greer1d69c2b2012-07-20 13:35:13 +00001337#ifdef CONFIG_RPS
Jiri Pirko76ff5cc2012-07-20 02:28:48 +00001338 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
Mark A. Greer1d69c2b2012-07-20 13:35:13 +00001339#endif
Nicolas Dichtela54acb32015-04-02 17:07:00 +02001340 (dev->ifindex != dev_get_iflink(dev) &&
1341 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
Jiri Pirko898e5062013-01-03 22:48:52 +00001342 (upper_dev &&
1343 nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
Jiri Pirko9a572472012-12-27 23:49:39 +00001344 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
David S. Millera6574342012-04-01 20:12:00 -04001345 (dev->qdisc &&
1346 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1347 (dev->ifalias &&
david decotigny2d3b4792014-03-29 09:48:35 -07001348 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
1349 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
Anuradha Karuppiah88d63782015-07-14 13:43:20 -07001350 atomic_read(&dev->carrier_changes)) ||
1351 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
David S. Millera6574342012-04-01 20:12:00 -04001352 goto nla_put_failure;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001353
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04001354 if (event != IFLA_EVENT_NONE) {
1355 if (nla_put_u32(skb, IFLA_EVENT, event))
1356 goto nla_put_failure;
1357 }
1358
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001359 if (rtnl_fill_link_ifmap(skb, dev))
1360 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
1362 if (dev->addr_len) {
David S. Millera6574342012-04-01 20:12:00 -04001363 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1364 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1365 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 }
1367
Jiri Pirko66cae9e2013-07-29 18:16:50 +02001368 if (rtnl_phys_port_id_fill(skb, dev))
1369 goto nla_put_failure;
1370
David Aherndb24a902015-03-17 20:23:15 -06001371 if (rtnl_phys_port_name_fill(skb, dev))
1372 goto nla_put_failure;
1373
Jiri Pirko82f28412014-11-28 14:34:18 +01001374 if (rtnl_phys_switch_id_fill(skb, dev))
1375 goto nla_put_failure;
1376
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001377 if (rtnl_fill_stats(skb, dev))
Pavel Emelyanov96e74082008-05-21 14:12:46 -07001378 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
David S. Millera6574342012-04-01 20:12:00 -04001380 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
1381 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
1382 goto nla_put_failure;
Scott Feldman57b61082010-05-17 22:49:55 -07001383
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001384 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
1385 ext_filter_mask & RTEXT_FILTER_VF) {
Williams, Mitch Aebc08a62010-02-10 01:44:05 +00001386 int i;
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001387 struct nlattr *vfinfo;
Chris Wrightc02db8c2010-05-16 01:05:45 -07001388 int num_vfs = dev_num_vf(dev->dev.parent);
1389
Chris Wrightc02db8c2010-05-16 01:05:45 -07001390 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1391 if (!vfinfo)
1392 goto nla_put_failure;
1393 for (i = 0; i < num_vfs; i++) {
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001394 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
Chris Wrightc02db8c2010-05-16 01:05:45 -07001395 goto nla_put_failure;
Williams, Mitch Aebc08a62010-02-10 01:44:05 +00001396 }
Hannes Frederic Sowab22b9412015-11-17 14:16:52 +01001397
Chris Wrightc02db8c2010-05-16 01:05:45 -07001398 nla_nest_end(skb, vfinfo);
Williams, Mitch Aebc08a62010-02-10 01:44:05 +00001399 }
Scott Feldman57b61082010-05-17 22:49:55 -07001400
David Gibsonc53864f2014-04-24 10:22:36 +10001401 if (rtnl_port_fill(skb, dev, ext_filter_mask))
Scott Feldman57b61082010-05-17 22:49:55 -07001402 goto nla_put_failure;
1403
Brenden Blancod1fdd912016-07-19 12:16:49 -07001404 if (rtnl_xdp_fill(skb, dev))
1405 goto nla_put_failure;
1406
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01001407 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
Patrick McHardy38f7b872007-06-13 12:03:51 -07001408 if (rtnl_link_fill(skb, dev) < 0)
1409 goto nla_put_failure;
1410 }
1411
Nicolas Dichteld37512a2015-01-15 15:11:16 +01001412 if (dev->rtnl_link_ops &&
1413 dev->rtnl_link_ops->get_link_net) {
1414 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1415
1416 if (!net_eq(dev_net(dev), link_net)) {
Nicolas Dichtel7a0877d2015-05-07 11:02:49 +02001417 int id = peernet2id_alloc(dev_net(dev), link_net);
Nicolas Dichteld37512a2015-01-15 15:11:16 +01001418
1419 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1420 goto nla_put_failure;
1421 }
1422 }
1423
Thomas Graff8ff1822010-11-16 04:30:14 +00001424 if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
1425 goto nla_put_failure;
1426
1427 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
1428 if (af_ops->fill_link_af) {
1429 struct nlattr *af;
1430 int err;
1431
1432 if (!(af = nla_nest_start(skb, af_ops->family)))
1433 goto nla_put_failure;
1434
Sowmini Varadhand5566fd2015-09-11 16:48:48 -04001435 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
Thomas Graff8ff1822010-11-16 04:30:14 +00001436
1437 /*
1438 * Caller may return ENODATA to indicate that there
1439 * was no data to be dumped. This is not an error, it
1440 * means we should trim the attribute header and
1441 * continue.
1442 */
1443 if (err == -ENODATA)
1444 nla_nest_cancel(skb, af);
1445 else if (err < 0)
1446 goto nla_put_failure;
1447
1448 nla_nest_end(skb, af);
1449 }
1450 }
1451
1452 nla_nest_end(skb, af_spec);
1453
Johannes Berg053c0952015-01-16 22:09:00 +01001454 nlmsg_end(skb, nlh);
1455 return 0;
Thomas Grafb60c5112006-08-04 23:05:34 -07001456
1457nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08001458 nlmsg_cancel(skb, nlh);
1459 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460}
1461
Jiri Pirkof7b12602014-02-18 20:53:18 +01001462static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1463 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1464 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1465 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1466 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1467 [IFLA_MTU] = { .type = NLA_U32 },
1468 [IFLA_LINK] = { .type = NLA_U32 },
1469 [IFLA_MASTER] = { .type = NLA_U32 },
1470 [IFLA_CARRIER] = { .type = NLA_U8 },
1471 [IFLA_TXQLEN] = { .type = NLA_U32 },
1472 [IFLA_WEIGHT] = { .type = NLA_U32 },
1473 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1474 [IFLA_LINKMODE] = { .type = NLA_U8 },
1475 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1476 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1477 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1478 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
1479 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1480 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1481 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1482 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1483 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1484 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1485 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1486 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
Jiri Pirko02637fc2014-11-28 14:34:16 +01001487 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
david decotigny2d3b4792014-03-29 09:48:35 -07001488 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
Jiri Pirko82f28412014-11-28 14:34:18 +01001489 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
Nicolas Dichtel317f4812015-01-15 15:11:18 +01001490 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
Anuradha Karuppiah88d63782015-07-14 13:43:20 -07001491 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
Brenden Blancod1fdd912016-07-19 12:16:49 -07001492 [IFLA_XDP] = { .type = NLA_NESTED },
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04001493 [IFLA_EVENT] = { .type = NLA_U32 },
Serhey Popovychdb833d42017-06-20 14:35:23 +03001494 [IFLA_GROUP] = { .type = NLA_U32 },
Jiri Pirkof7b12602014-02-18 20:53:18 +01001495};
1496
1497static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1498 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1499 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1500 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1501 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1502};
1503
Jiri Pirkof7b12602014-02-18 20:53:18 +01001504static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
Daniel Borkmann364d5712015-02-05 18:44:04 +01001505 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1506 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
Moshe Shemesh79aab092016-09-22 12:11:15 +03001507 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
Daniel Borkmann364d5712015-02-05 18:44:04 +01001508 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1509 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1510 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1511 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
Vlad Zolotarov01a3d792015-03-30 21:35:23 +03001512 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +03001513 [IFLA_VF_STATS] = { .type = NLA_NESTED },
Hiroshi Shimamotodd461d62015-08-28 06:57:55 +00001514 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
Eli Cohencc8e27c2016-03-11 22:58:34 +02001515 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1516 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +03001517};
1518
Jiri Pirkof7b12602014-02-18 20:53:18 +01001519static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1520 [IFLA_PORT_VF] = { .type = NLA_U32 },
1521 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1522 .len = PORT_PROFILE_MAX },
Jiri Pirkof7b12602014-02-18 20:53:18 +01001523 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1524 .len = PORT_UUID_MAX },
1525 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1526 .len = PORT_UUID_MAX },
1527 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1528 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
Daniel Borkmann025331d2017-02-17 01:56:11 +01001529
1530 /* Unused, but we need to keep it here since user space could
1531 * fill it. It's also broken with regard to NLA_BINARY use in
1532 * combination with structs.
1533 */
1534 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1535 .len = sizeof(struct ifla_port_vsi) },
Jiri Pirkof7b12602014-02-18 20:53:18 +01001536};
1537
Brenden Blancod1fdd912016-07-19 12:16:49 -07001538static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1539 [IFLA_XDP_FD] = { .type = NLA_S32 },
1540 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
Daniel Borkmann85de8572016-11-28 23:16:54 +01001541 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
Martin KaFai Lau58038692017-06-15 17:29:09 -07001542 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
Brenden Blancod1fdd912016-07-19 12:16:49 -07001543};
1544
David Aherndc599f72016-02-02 08:17:07 -08001545static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1546{
1547 const struct rtnl_link_ops *ops = NULL;
1548 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1549
Johannes Bergfceb6432017-04-12 14:34:07 +02001550 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
1551 ifla_info_policy, NULL) < 0)
David Aherndc599f72016-02-02 08:17:07 -08001552 return NULL;
1553
1554 if (linfo[IFLA_INFO_KIND]) {
1555 char kind[MODULE_NAME_LEN];
1556
1557 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1558 ops = rtnl_link_ops_get(kind);
1559 }
1560
1561 return ops;
1562}
1563
1564static bool link_master_filtered(struct net_device *dev, int master_idx)
1565{
1566 struct net_device *master;
1567
1568 if (!master_idx)
1569 return false;
1570
1571 master = netdev_master_upper_dev_get(dev);
1572 if (!master || master->ifindex != master_idx)
1573 return true;
1574
1575 return false;
1576}
1577
1578static bool link_kind_filtered(const struct net_device *dev,
1579 const struct rtnl_link_ops *kind_ops)
1580{
1581 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1582 return true;
1583
1584 return false;
1585}
1586
1587static bool link_dump_filtered(struct net_device *dev,
1588 int master_idx,
1589 const struct rtnl_link_ops *kind_ops)
1590{
1591 if (link_master_filtered(dev, master_idx) ||
1592 link_kind_filtered(dev, kind_ops))
1593 return true;
1594
1595 return false;
1596}
1597
Thomas Grafb60c5112006-08-04 23:05:34 -07001598static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001600 struct net *net = sock_net(skb->sk);
Eric Dumazet7c28bd02009-10-24 06:13:17 -07001601 int h, s_h;
1602 int idx = 0, s_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 struct net_device *dev;
Eric Dumazet7c28bd02009-10-24 06:13:17 -07001604 struct hlist_head *head;
Greg Rose115c9b82012-02-21 16:54:48 -05001605 struct nlattr *tb[IFLA_MAX+1];
1606 u32 ext_filter_mask = 0;
David Aherndc599f72016-02-02 08:17:07 -08001607 const struct rtnl_link_ops *kind_ops = NULL;
1608 unsigned int flags = NLM_F_MULTI;
1609 int master_idx = 0;
David Gibson973462b2014-04-24 10:22:35 +10001610 int err;
Michal Schmidte5eca6d2014-05-28 14:15:19 +02001611 int hdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
Eric Dumazet7c28bd02009-10-24 06:13:17 -07001613 s_h = cb->args[0];
1614 s_idx = cb->args[1];
1615
Thomas Graf4e985ad2011-06-21 03:11:20 +00001616 cb->seq = net->dev_base_seq;
1617
Michal Schmidte5eca6d2014-05-28 14:15:19 +02001618 /* A hack to preserve kernel<->userspace interface.
1619 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1620 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1621 * what iproute2 < v3.9.0 used.
1622 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1623 * attribute, its netlink message is shorter than struct ifinfomsg.
1624 */
1625 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1626 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1627
Johannes Bergfceb6432017-04-12 14:34:07 +02001628 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
1629 ifla_policy, NULL) >= 0) {
Eric Dumazeta4b64fb2012-03-04 12:32:10 +00001630 if (tb[IFLA_EXT_MASK])
1631 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
David Aherndc599f72016-02-02 08:17:07 -08001632
1633 if (tb[IFLA_MASTER])
1634 master_idx = nla_get_u32(tb[IFLA_MASTER]);
1635
1636 if (tb[IFLA_LINKINFO])
1637 kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
1638
1639 if (master_idx || kind_ops)
1640 flags |= NLM_F_DUMP_FILTERED;
Eric Dumazeta4b64fb2012-03-04 12:32:10 +00001641 }
Greg Rose115c9b82012-02-21 16:54:48 -05001642
Eric Dumazet7c28bd02009-10-24 06:13:17 -07001643 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1644 idx = 0;
1645 head = &net->dev_index_head[h];
Eric Dumazetcac5e652015-02-27 09:42:50 -08001646 hlist_for_each_entry(dev, head, index_hlist) {
David Aherndc599f72016-02-02 08:17:07 -08001647 if (link_dump_filtered(dev, master_idx, kind_ops))
Zhang Shengju3f0ae052016-11-19 23:28:32 +08001648 goto cont;
Eric Dumazet7c28bd02009-10-24 06:13:17 -07001649 if (idx < s_idx)
1650 goto cont;
David Gibson973462b2014-04-24 10:22:35 +10001651 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1652 NETLINK_CB(cb->skb).portid,
1653 cb->nlh->nlmsg_seq, 0,
David Aherndc599f72016-02-02 08:17:07 -08001654 flags,
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04001655 ext_filter_mask, 0);
David Gibson973462b2014-04-24 10:22:35 +10001656
David Ahernf6c57752017-05-15 23:19:17 -07001657 if (err < 0) {
1658 if (likely(skb->len))
1659 goto out;
1660
1661 goto out_err;
1662 }
Thomas Graf4e985ad2011-06-21 03:11:20 +00001663
1664 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
Pavel Emelianov7562f872007-05-03 15:13:45 -07001665cont:
Eric Dumazet7c28bd02009-10-24 06:13:17 -07001666 idx++;
1667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 }
Eric Dumazet7c28bd02009-10-24 06:13:17 -07001669out:
David Ahernf6c57752017-05-15 23:19:17 -07001670 err = skb->len;
1671out_err:
Eric Dumazet7c28bd02009-10-24 06:13:17 -07001672 cb->args[1] = idx;
1673 cb->args[0] = h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
David Ahernf6c57752017-05-15 23:19:17 -07001675 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676}
1677
Johannes Bergfceb6432017-04-12 14:34:07 +02001678int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
1679 struct netlink_ext_ack *exterr)
Jiri Pirkof7b12602014-02-18 20:53:18 +01001680{
Johannes Bergfceb6432017-04-12 14:34:07 +02001681 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
Jiri Pirkof7b12602014-02-18 20:53:18 +01001682}
1683EXPORT_SYMBOL(rtnl_nla_parse_ifla);
Scott Feldman57b61082010-05-17 22:49:55 -07001684
Eric W. Biederman81adee42009-11-08 00:53:51 -08001685struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1686{
1687 struct net *net;
1688 /* Examine the link attributes and figure out which
1689 * network namespace we are talking about.
1690 */
1691 if (tb[IFLA_NET_NS_PID])
1692 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
Eric W. Biedermanf0630522011-05-04 17:51:50 -07001693 else if (tb[IFLA_NET_NS_FD])
1694 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
Eric W. Biederman81adee42009-11-08 00:53:51 -08001695 else
1696 net = get_net(src_net);
1697 return net;
1698}
1699EXPORT_SYMBOL(rtnl_link_get_net);
1700
Thomas Graf1840bb12008-02-23 19:54:36 -08001701static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1702{
1703 if (dev) {
1704 if (tb[IFLA_ADDRESS] &&
1705 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
1706 return -EINVAL;
1707
1708 if (tb[IFLA_BROADCAST] &&
1709 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
1710 return -EINVAL;
1711 }
1712
Thomas Grafcf7afbf2010-11-22 01:31:54 +00001713 if (tb[IFLA_AF_SPEC]) {
1714 struct nlattr *af;
1715 int rem, err;
1716
1717 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1718 const struct rtnl_af_ops *af_ops;
1719
1720 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1721 return -EAFNOSUPPORT;
1722
1723 if (!af_ops->set_link_af)
1724 return -EOPNOTSUPP;
1725
1726 if (af_ops->validate_link_af) {
Kurt Van Dijck6d3a9a62011-01-26 04:55:24 +00001727 err = af_ops->validate_link_af(dev, af);
Thomas Grafcf7afbf2010-11-22 01:31:54 +00001728 if (err < 0)
1729 return err;
1730 }
1731 }
1732 }
1733
Thomas Graf1840bb12008-02-23 19:54:36 -08001734 return 0;
1735}
1736
Eli Cohencc8e27c2016-03-11 22:58:34 +02001737static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
1738 int guid_type)
1739{
1740 const struct net_device_ops *ops = dev->netdev_ops;
1741
1742 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
1743}
1744
1745static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
1746{
1747 if (dev->type != ARPHRD_INFINIBAND)
1748 return -EOPNOTSUPP;
1749
1750 return handle_infiniband_guid(dev, ivt, guid_type);
1751}
1752
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02001753static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
Chris Wrightc02db8c2010-05-16 01:05:45 -07001754{
Chris Wrightc02db8c2010-05-16 01:05:45 -07001755 const struct net_device_ops *ops = dev->netdev_ops;
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02001756 int err = -EINVAL;
Chris Wrightc02db8c2010-05-16 01:05:45 -07001757
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02001758 if (tb[IFLA_VF_MAC]) {
1759 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
Vlad Zolotarov01a3d792015-03-30 21:35:23 +03001760
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02001761 err = -EOPNOTSUPP;
1762 if (ops->ndo_set_vf_mac)
1763 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1764 ivm->mac);
1765 if (err < 0)
1766 return err;
Chris Wrightc02db8c2010-05-16 01:05:45 -07001767 }
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02001768
1769 if (tb[IFLA_VF_VLAN]) {
1770 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
1771
1772 err = -EOPNOTSUPP;
1773 if (ops->ndo_set_vf_vlan)
1774 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
Moshe Shemesh79aab092016-09-22 12:11:15 +03001775 ivv->qos,
1776 htons(ETH_P_8021Q));
1777 if (err < 0)
1778 return err;
1779 }
1780
1781 if (tb[IFLA_VF_VLAN_LIST]) {
1782 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
1783 struct nlattr *attr;
1784 int rem, len = 0;
1785
1786 err = -EOPNOTSUPP;
1787 if (!ops->ndo_set_vf_vlan)
1788 return err;
1789
1790 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
1791 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
1792 nla_len(attr) < NLA_HDRLEN) {
1793 return -EINVAL;
1794 }
1795 if (len >= MAX_VLAN_LIST_LEN)
1796 return -EOPNOTSUPP;
1797 ivvl[len] = nla_data(attr);
1798
1799 len++;
1800 }
Arnd Bergmannfa34cd92016-09-30 18:13:49 +02001801 if (len == 0)
1802 return -EINVAL;
1803
Moshe Shemesh79aab092016-09-22 12:11:15 +03001804 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
1805 ivvl[0]->qos, ivvl[0]->vlan_proto);
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02001806 if (err < 0)
1807 return err;
1808 }
1809
1810 if (tb[IFLA_VF_TX_RATE]) {
1811 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1812 struct ifla_vf_info ivf;
1813
1814 err = -EOPNOTSUPP;
1815 if (ops->ndo_get_vf_config)
1816 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
1817 if (err < 0)
1818 return err;
1819
1820 err = -EOPNOTSUPP;
1821 if (ops->ndo_set_vf_rate)
1822 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1823 ivf.min_tx_rate,
1824 ivt->rate);
1825 if (err < 0)
1826 return err;
1827 }
1828
1829 if (tb[IFLA_VF_RATE]) {
1830 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
1831
1832 err = -EOPNOTSUPP;
1833 if (ops->ndo_set_vf_rate)
1834 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1835 ivt->min_tx_rate,
1836 ivt->max_tx_rate);
1837 if (err < 0)
1838 return err;
1839 }
1840
1841 if (tb[IFLA_VF_SPOOFCHK]) {
1842 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
1843
1844 err = -EOPNOTSUPP;
1845 if (ops->ndo_set_vf_spoofchk)
1846 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1847 ivs->setting);
1848 if (err < 0)
1849 return err;
1850 }
1851
1852 if (tb[IFLA_VF_LINK_STATE]) {
1853 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
1854
1855 err = -EOPNOTSUPP;
1856 if (ops->ndo_set_vf_link_state)
1857 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1858 ivl->link_state);
1859 if (err < 0)
1860 return err;
1861 }
1862
1863 if (tb[IFLA_VF_RSS_QUERY_EN]) {
1864 struct ifla_vf_rss_query_en *ivrssq_en;
1865
1866 err = -EOPNOTSUPP;
1867 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
1868 if (ops->ndo_set_vf_rss_query_en)
1869 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
1870 ivrssq_en->setting);
1871 if (err < 0)
1872 return err;
1873 }
1874
Hiroshi Shimamotodd461d62015-08-28 06:57:55 +00001875 if (tb[IFLA_VF_TRUST]) {
1876 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
1877
1878 err = -EOPNOTSUPP;
1879 if (ops->ndo_set_vf_trust)
1880 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
1881 if (err < 0)
1882 return err;
1883 }
1884
Eli Cohencc8e27c2016-03-11 22:58:34 +02001885 if (tb[IFLA_VF_IB_NODE_GUID]) {
1886 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
1887
1888 if (!ops->ndo_set_vf_guid)
1889 return -EOPNOTSUPP;
1890
1891 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
1892 }
1893
1894 if (tb[IFLA_VF_IB_PORT_GUID]) {
1895 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
1896
1897 if (!ops->ndo_set_vf_guid)
1898 return -EOPNOTSUPP;
1899
1900 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
1901 }
1902
Chris Wrightc02db8c2010-05-16 01:05:45 -07001903 return err;
1904}
1905
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001906static int do_set_master(struct net_device *dev, int ifindex)
1907{
Jiri Pirko898e5062013-01-03 22:48:52 +00001908 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001909 const struct net_device_ops *ops;
1910 int err;
1911
Jiri Pirko898e5062013-01-03 22:48:52 +00001912 if (upper_dev) {
1913 if (upper_dev->ifindex == ifindex)
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001914 return 0;
Jiri Pirko898e5062013-01-03 22:48:52 +00001915 ops = upper_dev->netdev_ops;
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001916 if (ops->ndo_del_slave) {
Jiri Pirko898e5062013-01-03 22:48:52 +00001917 err = ops->ndo_del_slave(upper_dev, dev);
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001918 if (err)
1919 return err;
1920 } else {
1921 return -EOPNOTSUPP;
1922 }
1923 }
1924
1925 if (ifindex) {
Jiri Pirko898e5062013-01-03 22:48:52 +00001926 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
1927 if (!upper_dev)
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001928 return -EINVAL;
Jiri Pirko898e5062013-01-03 22:48:52 +00001929 ops = upper_dev->netdev_ops;
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001930 if (ops->ndo_add_slave) {
Jiri Pirko898e5062013-01-03 22:48:52 +00001931 err = ops->ndo_add_slave(upper_dev, dev);
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001932 if (err)
1933 return err;
1934 } else {
1935 return -EOPNOTSUPP;
1936 }
1937 }
1938 return 0;
1939}
1940
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02001941#define DO_SETLINK_MODIFIED 0x01
Nicolas Dichtelba998902014-09-01 16:07:29 +02001942/* notify flag means notify + modified. */
1943#define DO_SETLINK_NOTIFY 0x03
Eric W. Biederman90f62cf2014-04-23 14:29:27 -07001944static int do_setlink(const struct sk_buff *skb,
1945 struct net_device *dev, struct ifinfomsg *ifm,
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07001946 struct netlink_ext_ack *extack,
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02001947 struct nlattr **tb, char *ifname, int status)
Patrick McHardy0157f602007-06-13 12:03:36 -07001948{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001949 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy0157f602007-06-13 12:03:36 -07001950 int err;
1951
Eric W. Biedermanf0630522011-05-04 17:51:50 -07001952 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
Eric W. Biederman81adee42009-11-08 00:53:51 -08001953 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
Eric W. Biedermand8a5ec62007-09-12 13:57:04 +02001954 if (IS_ERR(net)) {
1955 err = PTR_ERR(net);
1956 goto errout;
1957 }
Eric W. Biederman90f62cf2014-04-23 14:29:27 -07001958 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
Nicolas Dichtele0ebde02014-11-27 10:16:15 +01001959 put_net(net);
Eric W. Biedermanb51642f2012-11-16 03:03:11 +00001960 err = -EPERM;
1961 goto errout;
1962 }
Eric W. Biedermand8a5ec62007-09-12 13:57:04 +02001963 err = dev_change_net_namespace(dev, net, ifname);
1964 put_net(net);
1965 if (err)
1966 goto errout;
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02001967 status |= DO_SETLINK_MODIFIED;
Eric W. Biedermand8a5ec62007-09-12 13:57:04 +02001968 }
1969
Patrick McHardy0157f602007-06-13 12:03:36 -07001970 if (tb[IFLA_MAP]) {
1971 struct rtnl_link_ifmap *u_map;
1972 struct ifmap k_map;
1973
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001974 if (!ops->ndo_set_config) {
Patrick McHardy0157f602007-06-13 12:03:36 -07001975 err = -EOPNOTSUPP;
1976 goto errout;
1977 }
1978
1979 if (!netif_device_present(dev)) {
1980 err = -ENODEV;
1981 goto errout;
1982 }
1983
1984 u_map = nla_data(tb[IFLA_MAP]);
1985 k_map.mem_start = (unsigned long) u_map->mem_start;
1986 k_map.mem_end = (unsigned long) u_map->mem_end;
1987 k_map.base_addr = (unsigned short) u_map->base_addr;
1988 k_map.irq = (unsigned char) u_map->irq;
1989 k_map.dma = (unsigned char) u_map->dma;
1990 k_map.port = (unsigned char) u_map->port;
1991
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001992 err = ops->ndo_set_config(dev, &k_map);
Patrick McHardy0157f602007-06-13 12:03:36 -07001993 if (err < 0)
1994 goto errout;
1995
Nicolas Dichtelba998902014-09-01 16:07:29 +02001996 status |= DO_SETLINK_NOTIFY;
Patrick McHardy0157f602007-06-13 12:03:36 -07001997 }
1998
1999 if (tb[IFLA_ADDRESS]) {
2000 struct sockaddr *sa;
2001 int len;
2002
WANG Cong153711f2017-07-20 11:27:57 -07002003 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2004 sizeof(*sa));
Patrick McHardy0157f602007-06-13 12:03:36 -07002005 sa = kmalloc(len, GFP_KERNEL);
2006 if (!sa) {
2007 err = -ENOMEM;
2008 goto errout;
2009 }
2010 sa->sa_family = dev->type;
2011 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2012 dev->addr_len);
Jiri Pirkoe7c32732013-01-01 03:30:13 +00002013 err = dev_set_mac_address(dev, sa);
Patrick McHardy0157f602007-06-13 12:03:36 -07002014 kfree(sa);
2015 if (err)
2016 goto errout;
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02002017 status |= DO_SETLINK_MODIFIED;
Patrick McHardy0157f602007-06-13 12:03:36 -07002018 }
2019
2020 if (tb[IFLA_MTU]) {
2021 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2022 if (err < 0)
2023 goto errout;
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02002024 status |= DO_SETLINK_MODIFIED;
Patrick McHardy0157f602007-06-13 12:03:36 -07002025 }
2026
Vlad Dogarucbda10f2011-01-13 23:38:30 +00002027 if (tb[IFLA_GROUP]) {
2028 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
Nicolas Dichtelba998902014-09-01 16:07:29 +02002029 status |= DO_SETLINK_NOTIFY;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00002030 }
2031
Patrick McHardy0157f602007-06-13 12:03:36 -07002032 /*
2033 * Interface selected by interface index but interface
2034 * name provided implies that a name change has been
2035 * requested.
2036 */
2037 if (ifm->ifi_index > 0 && ifname[0]) {
2038 err = dev_change_name(dev, ifname);
2039 if (err < 0)
2040 goto errout;
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02002041 status |= DO_SETLINK_MODIFIED;
Patrick McHardy0157f602007-06-13 12:03:36 -07002042 }
2043
Stephen Hemminger0b815a12008-09-22 21:28:11 -07002044 if (tb[IFLA_IFALIAS]) {
2045 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2046 nla_len(tb[IFLA_IFALIAS]));
2047 if (err < 0)
2048 goto errout;
Nicolas Dichtelba998902014-09-01 16:07:29 +02002049 status |= DO_SETLINK_NOTIFY;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07002050 }
2051
Patrick McHardy0157f602007-06-13 12:03:36 -07002052 if (tb[IFLA_BROADCAST]) {
2053 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
Jiri Pirkoe7c32732013-01-01 03:30:13 +00002054 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Patrick McHardy0157f602007-06-13 12:03:36 -07002055 }
2056
2057 if (ifm->ifi_flags || ifm->ifi_change) {
Patrick McHardy3729d502010-02-26 06:34:54 +00002058 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
Johannes Berg5f9021c2008-11-16 23:20:31 -08002059 if (err < 0)
2060 goto errout;
Patrick McHardy0157f602007-06-13 12:03:36 -07002061 }
2062
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00002063 if (tb[IFLA_MASTER]) {
2064 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
2065 if (err)
2066 goto errout;
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02002067 status |= DO_SETLINK_MODIFIED;
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00002068 }
2069
Jiri Pirko9a572472012-12-27 23:49:39 +00002070 if (tb[IFLA_CARRIER]) {
2071 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2072 if (err)
2073 goto errout;
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02002074 status |= DO_SETLINK_MODIFIED;
Jiri Pirko9a572472012-12-27 23:49:39 +00002075 }
2076
Nicolas Dichtel5d1180f2014-09-01 16:07:26 +02002077 if (tb[IFLA_TXQLEN]) {
Alexey Dobriyan0cd29502017-05-17 13:30:44 +03002078 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2079 unsigned int orig_len = dev->tx_queue_len;
Nicolas Dichtel5d1180f2014-09-01 16:07:26 +02002080
Jason Wang08294a22016-06-30 14:45:35 +08002081 if (dev->tx_queue_len ^ value) {
2082 dev->tx_queue_len = value;
2083 err = call_netdevice_notifiers(
2084 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
2085 err = notifier_to_errno(err);
2086 if (err) {
2087 dev->tx_queue_len = orig_len;
2088 goto errout;
2089 }
Nicolas Dichtelba998902014-09-01 16:07:29 +02002090 status |= DO_SETLINK_NOTIFY;
Jason Wang08294a22016-06-30 14:45:35 +08002091 }
Nicolas Dichtel5d1180f2014-09-01 16:07:26 +02002092 }
Patrick McHardy0157f602007-06-13 12:03:36 -07002093
Patrick McHardy0157f602007-06-13 12:03:36 -07002094 if (tb[IFLA_OPERSTATE])
David S. Miller93b2d4a2008-02-17 18:35:07 -08002095 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
Patrick McHardy0157f602007-06-13 12:03:36 -07002096
2097 if (tb[IFLA_LINKMODE]) {
Nicolas Dichtel1889b0e2014-09-01 16:07:27 +02002098 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2099
David S. Miller93b2d4a2008-02-17 18:35:07 -08002100 write_lock_bh(&dev_base_lock);
Nicolas Dichtel1889b0e2014-09-01 16:07:27 +02002101 if (dev->link_mode ^ value)
Nicolas Dichtelba998902014-09-01 16:07:29 +02002102 status |= DO_SETLINK_NOTIFY;
Nicolas Dichtel1889b0e2014-09-01 16:07:27 +02002103 dev->link_mode = value;
David S. Miller93b2d4a2008-02-17 18:35:07 -08002104 write_unlock_bh(&dev_base_lock);
Patrick McHardy0157f602007-06-13 12:03:36 -07002105 }
2106
Chris Wrightc02db8c2010-05-16 01:05:45 -07002107 if (tb[IFLA_VFINFO_LIST]) {
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02002108 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
Chris Wrightc02db8c2010-05-16 01:05:45 -07002109 struct nlattr *attr;
2110 int rem;
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02002111
Chris Wrightc02db8c2010-05-16 01:05:45 -07002112 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02002113 if (nla_type(attr) != IFLA_VF_INFO ||
2114 nla_len(attr) < NLA_HDRLEN) {
David Howells253683b2010-05-21 02:25:27 +00002115 err = -EINVAL;
Chris Wrightc02db8c2010-05-16 01:05:45 -07002116 goto errout;
David Howells253683b2010-05-21 02:25:27 +00002117 }
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02002118 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
Johannes Bergfceb6432017-04-12 14:34:07 +02002119 ifla_vf_policy, NULL);
Daniel Borkmann4f7d2cd2015-07-07 00:07:52 +02002120 if (err < 0)
2121 goto errout;
2122 err = do_setvfinfo(dev, vfinfo);
Chris Wrightc02db8c2010-05-16 01:05:45 -07002123 if (err < 0)
2124 goto errout;
Nicolas Dichtelba998902014-09-01 16:07:29 +02002125 status |= DO_SETLINK_NOTIFY;
Chris Wrightc02db8c2010-05-16 01:05:45 -07002126 }
Williams, Mitch Aebc08a62010-02-10 01:44:05 +00002127 }
Patrick McHardy0157f602007-06-13 12:03:36 -07002128 err = 0;
2129
Scott Feldman57b61082010-05-17 22:49:55 -07002130 if (tb[IFLA_VF_PORTS]) {
2131 struct nlattr *port[IFLA_PORT_MAX+1];
2132 struct nlattr *attr;
2133 int vf;
2134 int rem;
2135
2136 err = -EOPNOTSUPP;
2137 if (!ops->ndo_set_vf_port)
2138 goto errout;
2139
2140 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
Daniel Borkmann035d2102015-07-13 00:06:02 +02002141 if (nla_type(attr) != IFLA_VF_PORT ||
2142 nla_len(attr) < NLA_HDRLEN) {
2143 err = -EINVAL;
2144 goto errout;
2145 }
2146 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
Johannes Bergfceb6432017-04-12 14:34:07 +02002147 ifla_port_policy, NULL);
Scott Feldman57b61082010-05-17 22:49:55 -07002148 if (err < 0)
2149 goto errout;
2150 if (!port[IFLA_PORT_VF]) {
2151 err = -EOPNOTSUPP;
2152 goto errout;
2153 }
2154 vf = nla_get_u32(port[IFLA_PORT_VF]);
2155 err = ops->ndo_set_vf_port(dev, vf, port);
2156 if (err < 0)
2157 goto errout;
Nicolas Dichtelba998902014-09-01 16:07:29 +02002158 status |= DO_SETLINK_NOTIFY;
Scott Feldman57b61082010-05-17 22:49:55 -07002159 }
2160 }
2161 err = 0;
2162
2163 if (tb[IFLA_PORT_SELF]) {
2164 struct nlattr *port[IFLA_PORT_MAX+1];
2165
2166 err = nla_parse_nested(port, IFLA_PORT_MAX,
Johannes Bergfceb6432017-04-12 14:34:07 +02002167 tb[IFLA_PORT_SELF], ifla_port_policy,
2168 NULL);
Scott Feldman57b61082010-05-17 22:49:55 -07002169 if (err < 0)
2170 goto errout;
2171
2172 err = -EOPNOTSUPP;
2173 if (ops->ndo_set_vf_port)
2174 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2175 if (err < 0)
2176 goto errout;
Nicolas Dichtelba998902014-09-01 16:07:29 +02002177 status |= DO_SETLINK_NOTIFY;
Scott Feldman57b61082010-05-17 22:49:55 -07002178 }
Thomas Graff8ff1822010-11-16 04:30:14 +00002179
2180 if (tb[IFLA_AF_SPEC]) {
2181 struct nlattr *af;
2182 int rem;
2183
2184 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2185 const struct rtnl_af_ops *af_ops;
2186
2187 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
Thomas Grafcf7afbf2010-11-22 01:31:54 +00002188 BUG();
Thomas Graff8ff1822010-11-16 04:30:14 +00002189
Thomas Grafcf7afbf2010-11-22 01:31:54 +00002190 err = af_ops->set_link_af(dev, af);
Thomas Graff8ff1822010-11-16 04:30:14 +00002191 if (err < 0)
2192 goto errout;
2193
Nicolas Dichtelba998902014-09-01 16:07:29 +02002194 status |= DO_SETLINK_NOTIFY;
Thomas Graff8ff1822010-11-16 04:30:14 +00002195 }
2196 }
Scott Feldman57b61082010-05-17 22:49:55 -07002197 err = 0;
2198
Anuradha Karuppiah88d63782015-07-14 13:43:20 -07002199 if (tb[IFLA_PROTO_DOWN]) {
2200 err = dev_change_proto_down(dev,
2201 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2202 if (err)
2203 goto errout;
2204 status |= DO_SETLINK_NOTIFY;
2205 }
2206
Brenden Blancod1fdd912016-07-19 12:16:49 -07002207 if (tb[IFLA_XDP]) {
2208 struct nlattr *xdp[IFLA_XDP_MAX + 1];
Daniel Borkmann85de8572016-11-28 23:16:54 +01002209 u32 xdp_flags = 0;
Brenden Blancod1fdd912016-07-19 12:16:49 -07002210
2211 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
Johannes Bergfceb6432017-04-12 14:34:07 +02002212 ifla_xdp_policy, NULL);
Brenden Blancod1fdd912016-07-19 12:16:49 -07002213 if (err < 0)
2214 goto errout;
2215
Martin KaFai Lau58038692017-06-15 17:29:09 -07002216 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
Brenden Blanco262d8622016-07-20 17:22:34 -07002217 err = -EINVAL;
2218 goto errout;
2219 }
Daniel Borkmann85de8572016-11-28 23:16:54 +01002220
2221 if (xdp[IFLA_XDP_FLAGS]) {
2222 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2223 if (xdp_flags & ~XDP_FLAGS_MASK) {
2224 err = -EINVAL;
2225 goto errout;
2226 }
Jakub Kicinskiee5d0322017-06-21 18:25:04 -07002227 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
Daniel Borkmann0489df92017-05-12 01:04:45 +02002228 err = -EINVAL;
2229 goto errout;
2230 }
Daniel Borkmann85de8572016-11-28 23:16:54 +01002231 }
2232
Brenden Blancod1fdd912016-07-19 12:16:49 -07002233 if (xdp[IFLA_XDP_FD]) {
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07002234 err = dev_change_xdp_fd(dev, extack,
Daniel Borkmann85de8572016-11-28 23:16:54 +01002235 nla_get_s32(xdp[IFLA_XDP_FD]),
2236 xdp_flags);
Brenden Blancod1fdd912016-07-19 12:16:49 -07002237 if (err)
2238 goto errout;
2239 status |= DO_SETLINK_NOTIFY;
2240 }
2241 }
2242
Patrick McHardy0157f602007-06-13 12:03:36 -07002243errout:
Nicolas Dichtelba998902014-09-01 16:07:29 +02002244 if (status & DO_SETLINK_MODIFIED) {
2245 if (status & DO_SETLINK_NOTIFY)
2246 netdev_state_change(dev);
2247
2248 if (err < 0)
2249 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2250 dev->name);
2251 }
Patrick McHardy0157f602007-06-13 12:03:36 -07002252
Patrick McHardy0157f602007-06-13 12:03:36 -07002253 return err;
2254}
2255
David Ahernc21ef3e2017-04-16 09:48:24 -07002256static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2257 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002259 struct net *net = sock_net(skb->sk);
Thomas Grafda5e0492006-08-10 21:17:37 -07002260 struct ifinfomsg *ifm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 struct net_device *dev;
Patrick McHardy0157f602007-06-13 12:03:36 -07002262 int err;
Thomas Grafda5e0492006-08-10 21:17:37 -07002263 struct nlattr *tb[IFLA_MAX+1];
2264 char ifname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
David Ahernc21ef3e2017-04-16 09:48:24 -07002266 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
2267 extack);
Thomas Grafda5e0492006-08-10 21:17:37 -07002268 if (err < 0)
2269 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
Thomas Graf5176f912006-08-26 20:13:18 -07002271 if (tb[IFLA_IFNAME])
2272 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
Patrick McHardy78e5b8912006-09-13 20:35:36 -07002273 else
2274 ifname[0] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 err = -EINVAL;
Thomas Grafda5e0492006-08-10 21:17:37 -07002277 ifm = nlmsg_data(nlh);
Patrick McHardy51055be2007-06-05 12:40:01 -07002278 if (ifm->ifi_index > 0)
Eric Dumazeta3d12892009-10-21 10:59:31 +00002279 dev = __dev_get_by_index(net, ifm->ifi_index);
Thomas Grafda5e0492006-08-10 21:17:37 -07002280 else if (tb[IFLA_IFNAME])
Eric Dumazeta3d12892009-10-21 10:59:31 +00002281 dev = __dev_get_by_name(net, ifname);
Thomas Grafda5e0492006-08-10 21:17:37 -07002282 else
2283 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
Thomas Grafda5e0492006-08-10 21:17:37 -07002285 if (dev == NULL) {
2286 err = -ENODEV;
2287 goto errout;
2288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
Eric Dumazete0d087a2009-11-07 01:26:17 -08002290 err = validate_linkmsg(dev, tb);
2291 if (err < 0)
Eric Dumazeta3d12892009-10-21 10:59:31 +00002292 goto errout;
Thomas Grafda5e0492006-08-10 21:17:37 -07002293
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07002294 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
Thomas Grafda5e0492006-08-10 21:17:37 -07002295errout:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 return err;
2297}
2298
WANG Cong66400d52015-03-24 11:53:31 -07002299static int rtnl_group_dellink(const struct net *net, int group)
2300{
2301 struct net_device *dev, *aux;
2302 LIST_HEAD(list_kill);
2303 bool found = false;
2304
2305 if (!group)
2306 return -EPERM;
2307
2308 for_each_netdev(net, dev) {
2309 if (dev->group == group) {
2310 const struct rtnl_link_ops *ops;
2311
2312 found = true;
2313 ops = dev->rtnl_link_ops;
2314 if (!ops || !ops->dellink)
2315 return -EOPNOTSUPP;
2316 }
2317 }
2318
2319 if (!found)
2320 return -ENODEV;
2321
2322 for_each_netdev_safe(net, dev, aux) {
2323 if (dev->group == group) {
2324 const struct rtnl_link_ops *ops;
2325
2326 ops = dev->rtnl_link_ops;
2327 ops->dellink(dev, &list_kill);
2328 }
2329 }
2330 unregister_netdevice_many(&list_kill);
2331
2332 return 0;
2333}
2334
Thomas Graf614732e2015-07-21 10:44:06 +02002335int rtnl_delete_link(struct net_device *dev)
2336{
2337 const struct rtnl_link_ops *ops;
2338 LIST_HEAD(list_kill);
2339
2340 ops = dev->rtnl_link_ops;
2341 if (!ops || !ops->dellink)
2342 return -EOPNOTSUPP;
2343
2344 ops->dellink(dev, &list_kill);
2345 unregister_netdevice_many(&list_kill);
2346
2347 return 0;
2348}
2349EXPORT_SYMBOL_GPL(rtnl_delete_link);
2350
David Ahernc21ef3e2017-04-16 09:48:24 -07002351static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2352 struct netlink_ext_ack *extack)
Patrick McHardy38f7b872007-06-13 12:03:51 -07002353{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002354 struct net *net = sock_net(skb->sk);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002355 struct net_device *dev;
2356 struct ifinfomsg *ifm;
2357 char ifname[IFNAMSIZ];
2358 struct nlattr *tb[IFLA_MAX+1];
2359 int err;
2360
David Ahernc21ef3e2017-04-16 09:48:24 -07002361 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002362 if (err < 0)
2363 return err;
2364
2365 if (tb[IFLA_IFNAME])
2366 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2367
2368 ifm = nlmsg_data(nlh);
2369 if (ifm->ifi_index > 0)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002370 dev = __dev_get_by_index(net, ifm->ifi_index);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002371 else if (tb[IFLA_IFNAME])
Eric W. Biederman881d9662007-09-17 11:56:21 -07002372 dev = __dev_get_by_name(net, ifname);
WANG Cong66400d52015-03-24 11:53:31 -07002373 else if (tb[IFLA_GROUP])
2374 return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
Patrick McHardy38f7b872007-06-13 12:03:51 -07002375 else
2376 return -EINVAL;
2377
2378 if (!dev)
2379 return -ENODEV;
2380
Thomas Graf614732e2015-07-21 10:44:06 +02002381 return rtnl_delete_link(dev);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002382}
2383
Patrick McHardy3729d502010-02-26 06:34:54 +00002384int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2385{
2386 unsigned int old_flags;
2387 int err;
2388
2389 old_flags = dev->flags;
2390 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
2391 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2392 if (err < 0)
2393 return err;
2394 }
2395
2396 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
Patrick McHardy3729d502010-02-26 06:34:54 +00002397
Nicolas Dichtela528c212013-09-25 12:02:44 +02002398 __dev_notify_flags(dev, old_flags, ~0U);
Patrick McHardy3729d502010-02-26 06:34:54 +00002399 return 0;
2400}
2401EXPORT_SYMBOL(rtnl_configure_link);
2402
Rami Rosenc0713562012-11-30 01:08:47 +00002403struct net_device *rtnl_create_link(struct net *net,
Thomas Graf78ebb0d2015-04-10 01:45:53 +02002404 const char *ifname, unsigned char name_assign_type,
Tom Gundersen55177502014-07-14 16:37:25 +02002405 const struct rtnl_link_ops *ops, struct nlattr *tb[])
Pavel Emelianove7199282007-08-08 22:16:38 -07002406{
Pavel Emelianove7199282007-08-08 22:16:38 -07002407 struct net_device *dev;
Jiri Pirkod40156a2012-07-20 02:28:47 +00002408 unsigned int num_tx_queues = 1;
2409 unsigned int num_rx_queues = 1;
Pavel Emelianove7199282007-08-08 22:16:38 -07002410
Jiri Pirko76ff5cc2012-07-20 02:28:48 +00002411 if (tb[IFLA_NUM_TX_QUEUES])
2412 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
2413 else if (ops->get_num_tx_queues)
Jiri Pirkod40156a2012-07-20 02:28:47 +00002414 num_tx_queues = ops->get_num_tx_queues();
Jiri Pirko76ff5cc2012-07-20 02:28:48 +00002415
2416 if (tb[IFLA_NUM_RX_QUEUES])
2417 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
2418 else if (ops->get_num_rx_queues)
Jiri Pirkod40156a2012-07-20 02:28:47 +00002419 num_rx_queues = ops->get_num_rx_queues();
stephen hemmingerefacb302012-04-10 18:34:43 +00002420
Tom Gundersen55177502014-07-14 16:37:25 +02002421 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
Tom Gundersenc835a672014-07-14 16:37:24 +02002422 ops->setup, num_tx_queues, num_rx_queues);
Pavel Emelianove7199282007-08-08 22:16:38 -07002423 if (!dev)
Tobias Klauserd1892e42017-02-20 16:32:06 +01002424 return ERR_PTR(-ENOMEM);
Pavel Emelianove7199282007-08-08 22:16:38 -07002425
Eric W. Biederman81adee42009-11-08 00:53:51 -08002426 dev_net_set(dev, net);
2427 dev->rtnl_link_ops = ops;
Patrick McHardy3729d502010-02-26 06:34:54 +00002428 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
Eric W. Biederman81adee42009-11-08 00:53:51 -08002429
Pavel Emelianove7199282007-08-08 22:16:38 -07002430 if (tb[IFLA_MTU])
2431 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
Jiri Pirko2afb9b52013-01-06 12:41:57 +00002432 if (tb[IFLA_ADDRESS]) {
Pavel Emelianove7199282007-08-08 22:16:38 -07002433 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
2434 nla_len(tb[IFLA_ADDRESS]));
Jiri Pirko2afb9b52013-01-06 12:41:57 +00002435 dev->addr_assign_type = NET_ADDR_SET;
2436 }
Pavel Emelianove7199282007-08-08 22:16:38 -07002437 if (tb[IFLA_BROADCAST])
2438 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
2439 nla_len(tb[IFLA_BROADCAST]));
2440 if (tb[IFLA_TXQLEN])
2441 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
2442 if (tb[IFLA_OPERSTATE])
David S. Miller93b2d4a2008-02-17 18:35:07 -08002443 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
Pavel Emelianove7199282007-08-08 22:16:38 -07002444 if (tb[IFLA_LINKMODE])
2445 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
Patrick McHardyffa934f2011-01-20 03:00:42 +00002446 if (tb[IFLA_GROUP])
2447 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
Pavel Emelianove7199282007-08-08 22:16:38 -07002448
2449 return dev;
Pavel Emelianove7199282007-08-08 22:16:38 -07002450}
Eric Dumazete0d087a2009-11-07 01:26:17 -08002451EXPORT_SYMBOL(rtnl_create_link);
Pavel Emelianove7199282007-08-08 22:16:38 -07002452
Eric W. Biederman90f62cf2014-04-23 14:29:27 -07002453static int rtnl_group_changelink(const struct sk_buff *skb,
2454 struct net *net, int group,
Vlad Dogarue7ed8282011-01-13 23:38:31 +00002455 struct ifinfomsg *ifm,
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07002456 struct netlink_ext_ack *extack,
Vlad Dogarue7ed8282011-01-13 23:38:31 +00002457 struct nlattr **tb)
2458{
WANG Congd0795352015-03-23 16:31:09 -07002459 struct net_device *dev, *aux;
Vlad Dogarue7ed8282011-01-13 23:38:31 +00002460 int err;
2461
WANG Congd0795352015-03-23 16:31:09 -07002462 for_each_netdev_safe(net, dev, aux) {
Vlad Dogarue7ed8282011-01-13 23:38:31 +00002463 if (dev->group == group) {
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07002464 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
Vlad Dogarue7ed8282011-01-13 23:38:31 +00002465 if (err < 0)
2466 return err;
2467 }
2468 }
2469
2470 return 0;
2471}
2472
David Ahernc21ef3e2017-04-16 09:48:24 -07002473static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2474 struct netlink_ext_ack *extack)
Patrick McHardy38f7b872007-06-13 12:03:51 -07002475{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002476 struct net *net = sock_net(skb->sk);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002477 const struct rtnl_link_ops *ops;
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002478 const struct rtnl_link_ops *m_ops = NULL;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002479 struct net_device *dev;
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002480 struct net_device *master_dev = NULL;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002481 struct ifinfomsg *ifm;
2482 char kind[MODULE_NAME_LEN];
2483 char ifname[IFNAMSIZ];
2484 struct nlattr *tb[IFLA_MAX+1];
2485 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
Tom Gundersen55177502014-07-14 16:37:25 +02002486 unsigned char name_assign_type = NET_NAME_USER;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002487 int err;
2488
Johannes Berg95a5afc2008-10-16 15:24:51 -07002489#ifdef CONFIG_MODULES
Patrick McHardy38f7b872007-06-13 12:03:51 -07002490replay:
Thomas Graf8072f082007-07-31 14:13:50 -07002491#endif
David Ahernc21ef3e2017-04-16 09:48:24 -07002492 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002493 if (err < 0)
2494 return err;
2495
2496 if (tb[IFLA_IFNAME])
2497 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2498 else
2499 ifname[0] = '\0';
2500
2501 ifm = nlmsg_data(nlh);
2502 if (ifm->ifi_index > 0)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002503 dev = __dev_get_by_index(net, ifm->ifi_index);
Vlad Dogarue7ed8282011-01-13 23:38:31 +00002504 else {
2505 if (ifname[0])
2506 dev = __dev_get_by_name(net, ifname);
Vlad Dogarue7ed8282011-01-13 23:38:31 +00002507 else
2508 dev = NULL;
2509 }
Patrick McHardy38f7b872007-06-13 12:03:51 -07002510
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002511 if (dev) {
2512 master_dev = netdev_master_upper_dev_get(dev);
2513 if (master_dev)
2514 m_ops = master_dev->rtnl_link_ops;
2515 }
2516
Eric Dumazete0d087a2009-11-07 01:26:17 -08002517 err = validate_linkmsg(dev, tb);
2518 if (err < 0)
Thomas Graf1840bb12008-02-23 19:54:36 -08002519 return err;
2520
Patrick McHardy38f7b872007-06-13 12:03:51 -07002521 if (tb[IFLA_LINKINFO]) {
2522 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
Johannes Bergfceb6432017-04-12 14:34:07 +02002523 tb[IFLA_LINKINFO], ifla_info_policy,
2524 NULL);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002525 if (err < 0)
2526 return err;
2527 } else
2528 memset(linkinfo, 0, sizeof(linkinfo));
2529
2530 if (linkinfo[IFLA_INFO_KIND]) {
2531 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
2532 ops = rtnl_link_ops_get(kind);
2533 } else {
2534 kind[0] = '\0';
2535 ops = NULL;
2536 }
2537
2538 if (1) {
Sasha Levin4e10fd52015-02-24 14:14:35 -05002539 struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
2540 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002541 struct nlattr **data = NULL;
2542 struct nlattr **slave_data = NULL;
Nicolas Dichtel317f4812015-01-15 15:11:18 +01002543 struct net *dest_net, *link_net = NULL;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002544
2545 if (ops) {
2546 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
2547 err = nla_parse_nested(attr, ops->maxtype,
2548 linkinfo[IFLA_INFO_DATA],
Johannes Bergfceb6432017-04-12 14:34:07 +02002549 ops->policy, NULL);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002550 if (err < 0)
2551 return err;
2552 data = attr;
2553 }
2554 if (ops->validate) {
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02002555 err = ops->validate(tb, data, extack);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002556 if (err < 0)
2557 return err;
2558 }
2559 }
2560
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002561 if (m_ops) {
2562 if (m_ops->slave_maxtype &&
2563 linkinfo[IFLA_INFO_SLAVE_DATA]) {
2564 err = nla_parse_nested(slave_attr,
2565 m_ops->slave_maxtype,
2566 linkinfo[IFLA_INFO_SLAVE_DATA],
Johannes Bergfceb6432017-04-12 14:34:07 +02002567 m_ops->slave_policy,
2568 NULL);
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002569 if (err < 0)
2570 return err;
2571 slave_data = slave_attr;
2572 }
2573 if (m_ops->slave_validate) {
Matthias Schifferd116ffc2017-06-25 23:56:03 +02002574 err = m_ops->slave_validate(tb, slave_data,
2575 extack);
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002576 if (err < 0)
2577 return err;
2578 }
2579 }
2580
Patrick McHardy38f7b872007-06-13 12:03:51 -07002581 if (dev) {
Nicolas Dichtel90c325e2014-09-01 16:07:28 +02002582 int status = 0;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002583
2584 if (nlh->nlmsg_flags & NLM_F_EXCL)
2585 return -EEXIST;
2586 if (nlh->nlmsg_flags & NLM_F_REPLACE)
2587 return -EOPNOTSUPP;
2588
2589 if (linkinfo[IFLA_INFO_DATA]) {
2590 if (!ops || ops != dev->rtnl_link_ops ||
2591 !ops->changelink)
2592 return -EOPNOTSUPP;
2593
Matthias Schifferad744b22017-06-25 23:56:00 +02002594 err = ops->changelink(dev, tb, data, extack);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002595 if (err < 0)
2596 return err;
Nicolas Dichtelba998902014-09-01 16:07:29 +02002597 status |= DO_SETLINK_NOTIFY;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002598 }
2599
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002600 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
2601 if (!m_ops || !m_ops->slave_changelink)
2602 return -EOPNOTSUPP;
2603
2604 err = m_ops->slave_changelink(master_dev, dev,
Matthias Schiffer17dd0ec2017-06-25 23:56:02 +02002605 tb, slave_data,
2606 extack);
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002607 if (err < 0)
2608 return err;
Nicolas Dichtelba998902014-09-01 16:07:29 +02002609 status |= DO_SETLINK_NOTIFY;
Jiri Pirkoba7d49b2014-01-22 09:05:55 +01002610 }
2611
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07002612 return do_setlink(skb, dev, ifm, extack, tb, ifname,
2613 status);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002614 }
2615
Patrick McHardyffa934f2011-01-20 03:00:42 +00002616 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2617 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
Eric W. Biederman90f62cf2014-04-23 14:29:27 -07002618 return rtnl_group_changelink(skb, net,
Patrick McHardyffa934f2011-01-20 03:00:42 +00002619 nla_get_u32(tb[IFLA_GROUP]),
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07002620 ifm, extack, tb);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002621 return -ENODEV;
Patrick McHardyffa934f2011-01-20 03:00:42 +00002622 }
Patrick McHardy38f7b872007-06-13 12:03:51 -07002623
Theuns Verwoerd160ca012017-01-31 12:23:46 +13002624 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
Patrick McHardy38f7b872007-06-13 12:03:51 -07002625 return -EOPNOTSUPP;
2626
2627 if (!ops) {
Johannes Berg95a5afc2008-10-16 15:24:51 -07002628#ifdef CONFIG_MODULES
Patrick McHardy38f7b872007-06-13 12:03:51 -07002629 if (kind[0]) {
2630 __rtnl_unlock();
2631 request_module("rtnl-link-%s", kind);
2632 rtnl_lock();
2633 ops = rtnl_link_ops_get(kind);
2634 if (ops)
2635 goto replay;
2636 }
2637#endif
2638 return -EOPNOTSUPP;
2639 }
2640
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02002641 if (!ops->setup)
2642 return -EOPNOTSUPP;
2643
Tom Gundersen55177502014-07-14 16:37:25 +02002644 if (!ifname[0]) {
Patrick McHardy38f7b872007-06-13 12:03:51 -07002645 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
Tom Gundersen55177502014-07-14 16:37:25 +02002646 name_assign_type = NET_NAME_ENUM;
2647 }
Patrick McHardy38f7b872007-06-13 12:03:51 -07002648
Eric W. Biederman81adee42009-11-08 00:53:51 -08002649 dest_net = rtnl_link_get_net(net, tb);
Eric W. Biederman13ad1772011-01-29 14:57:22 +00002650 if (IS_ERR(dest_net))
2651 return PTR_ERR(dest_net);
2652
Eric W. Biederman505ce412015-02-26 16:19:00 -06002653 err = -EPERM;
2654 if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
2655 goto out;
2656
Nicolas Dichtel317f4812015-01-15 15:11:18 +01002657 if (tb[IFLA_LINK_NETNSID]) {
2658 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
2659
2660 link_net = get_net_ns_by_id(dest_net, id);
2661 if (!link_net) {
2662 err = -EINVAL;
2663 goto out;
2664 }
Eric W. Biederman06615be2015-02-26 16:20:07 -06002665 err = -EPERM;
2666 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
2667 goto out;
Nicolas Dichtel317f4812015-01-15 15:11:18 +01002668 }
2669
2670 dev = rtnl_create_link(link_net ? : dest_net, ifname,
2671 name_assign_type, ops, tb);
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00002672 if (IS_ERR(dev)) {
Pavel Emelianove7199282007-08-08 22:16:38 -07002673 err = PTR_ERR(dev);
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00002674 goto out;
2675 }
2676
2677 dev->ifindex = ifm->ifi_index;
2678
Cong Wang0e0eee22014-02-11 15:51:30 -08002679 if (ops->newlink) {
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +02002680 err = ops->newlink(link_net ? : net, dev, tb, data,
2681 extack);
Cong Wang0e0eee22014-02-11 15:51:30 -08002682 /* Drivers should call free_netdev() in ->destructor
Cong Wange51fb152014-06-03 16:40:47 -07002683 * and unregister it on failure after registration
2684 * so that device could be finally freed in rtnl_unlock.
Cong Wang0e0eee22014-02-11 15:51:30 -08002685 */
Cong Wange51fb152014-06-03 16:40:47 -07002686 if (err < 0) {
2687 /* If device is not registered at all, free it now */
2688 if (dev->reg_state == NETREG_UNINITIALIZED)
2689 free_netdev(dev);
Cong Wang0e0eee22014-02-11 15:51:30 -08002690 goto out;
Cong Wange51fb152014-06-03 16:40:47 -07002691 }
Cong Wang0e0eee22014-02-11 15:51:30 -08002692 } else {
Patrick McHardy2d85cba2007-07-11 19:42:13 -07002693 err = register_netdevice(dev);
Cong Wang0e0eee22014-02-11 15:51:30 -08002694 if (err < 0) {
2695 free_netdev(dev);
2696 goto out;
2697 }
Dan Carpenterfce9b9b2013-08-14 12:35:42 +03002698 }
Patrick McHardy3729d502010-02-26 06:34:54 +00002699 err = rtnl_configure_link(dev, ifm);
David S. Miller43638902015-03-10 21:58:32 -04002700 if (err < 0)
2701 goto out_unregister;
Nicolas Dichtelbdef2792015-01-20 15:15:42 +01002702 if (link_net) {
Nicolas Dichtel317f4812015-01-15 15:11:18 +01002703 err = dev_change_net_namespace(dev, dest_net, ifname);
Nicolas Dichtelbdef2792015-01-20 15:15:42 +01002704 if (err < 0)
David S. Miller43638902015-03-10 21:58:32 -04002705 goto out_unregister;
Nicolas Dichtelbdef2792015-01-20 15:15:42 +01002706 }
Theuns Verwoerd160ca012017-01-31 12:23:46 +13002707 if (tb[IFLA_MASTER]) {
2708 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
2709 if (err)
2710 goto out_unregister;
2711 }
Patrick McHardy3729d502010-02-26 06:34:54 +00002712out:
Nicolas Dichtel317f4812015-01-15 15:11:18 +01002713 if (link_net)
2714 put_net(link_net);
Eric W. Biederman81adee42009-11-08 00:53:51 -08002715 put_net(dest_net);
Patrick McHardy38f7b872007-06-13 12:03:51 -07002716 return err;
David S. Miller43638902015-03-10 21:58:32 -04002717out_unregister:
2718 if (ops->newlink) {
2719 LIST_HEAD(list_kill);
2720
2721 ops->dellink(dev, &list_kill);
2722 unregister_netdevice_many(&list_kill);
2723 } else {
2724 unregister_netdevice(dev);
2725 }
2726 goto out;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002727 }
2728}
2729
David Ahernc21ef3e2017-04-16 09:48:24 -07002730static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2731 struct netlink_ext_ack *extack)
Jean Tourrilhes711e2c32006-02-22 15:10:56 -08002732{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002733 struct net *net = sock_net(skb->sk);
Thomas Grafb60c5112006-08-04 23:05:34 -07002734 struct ifinfomsg *ifm;
Eric Dumazeta3d12892009-10-21 10:59:31 +00002735 char ifname[IFNAMSIZ];
Thomas Grafb60c5112006-08-04 23:05:34 -07002736 struct nlattr *tb[IFLA_MAX+1];
2737 struct net_device *dev = NULL;
2738 struct sk_buff *nskb;
Thomas Graf339bf982006-11-10 14:10:15 -08002739 int err;
Greg Rose115c9b82012-02-21 16:54:48 -05002740 u32 ext_filter_mask = 0;
Jean Tourrilhes711e2c32006-02-22 15:10:56 -08002741
David Ahernc21ef3e2017-04-16 09:48:24 -07002742 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
Thomas Grafb60c5112006-08-04 23:05:34 -07002743 if (err < 0)
Eric Sesterhenn9918f232006-09-26 23:26:38 -07002744 return err;
Thomas Grafb60c5112006-08-04 23:05:34 -07002745
Eric Dumazeta3d12892009-10-21 10:59:31 +00002746 if (tb[IFLA_IFNAME])
2747 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2748
Greg Rose115c9b82012-02-21 16:54:48 -05002749 if (tb[IFLA_EXT_MASK])
2750 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2751
Thomas Grafb60c5112006-08-04 23:05:34 -07002752 ifm = nlmsg_data(nlh);
Eric Dumazeta3d12892009-10-21 10:59:31 +00002753 if (ifm->ifi_index > 0)
2754 dev = __dev_get_by_index(net, ifm->ifi_index);
2755 else if (tb[IFLA_IFNAME])
2756 dev = __dev_get_by_name(net, ifname);
2757 else
Jean Tourrilhes711e2c32006-02-22 15:10:56 -08002758 return -EINVAL;
Thomas Grafb60c5112006-08-04 23:05:34 -07002759
Eric Dumazeta3d12892009-10-21 10:59:31 +00002760 if (dev == NULL)
2761 return -ENODEV;
2762
Greg Rose115c9b82012-02-21 16:54:48 -05002763 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
Eric Dumazeta3d12892009-10-21 10:59:31 +00002764 if (nskb == NULL)
2765 return -ENOBUFS;
Jean Tourrilhes711e2c32006-02-22 15:10:56 -08002766
Eric W. Biederman15e47302012-09-07 20:12:54 +00002767 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04002768 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0);
Patrick McHardy26932562007-01-31 23:16:40 -08002769 if (err < 0) {
2770 /* -EMSGSIZE implies BUG in if_nlmsg_size */
2771 WARN_ON(err == -EMSGSIZE);
2772 kfree_skb(nskb);
Eric Dumazeta3d12892009-10-21 10:59:31 +00002773 } else
Eric W. Biederman15e47302012-09-07 20:12:54 +00002774 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
Thomas Grafb60c5112006-08-04 23:05:34 -07002775
Jean Tourrilhes711e2c32006-02-22 15:10:56 -08002776 return err;
Jean Tourrilhes711e2c32006-02-22 15:10:56 -08002777}
Jean Tourrilhes711e2c32006-02-22 15:10:56 -08002778
Greg Rose115c9b82012-02-21 16:54:48 -05002779static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
Greg Rosec7ac8672011-06-10 01:27:09 +00002780{
Greg Rose115c9b82012-02-21 16:54:48 -05002781 struct net *net = sock_net(skb->sk);
2782 struct net_device *dev;
2783 struct nlattr *tb[IFLA_MAX+1];
2784 u32 ext_filter_mask = 0;
2785 u16 min_ifinfo_dump_size = 0;
Michal Schmidte5eca6d2014-05-28 14:15:19 +02002786 int hdrlen;
Greg Rose115c9b82012-02-21 16:54:48 -05002787
Michal Schmidte5eca6d2014-05-28 14:15:19 +02002788 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
2789 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2790 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2791
Johannes Bergfceb6432017-04-12 14:34:07 +02002792 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
Eric Dumazeta4b64fb2012-03-04 12:32:10 +00002793 if (tb[IFLA_EXT_MASK])
2794 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2795 }
Greg Rose115c9b82012-02-21 16:54:48 -05002796
2797 if (!ext_filter_mask)
2798 return NLMSG_GOODSIZE;
2799 /*
2800 * traverse the list of net devices and compute the minimum
2801 * buffer size based upon the filter mask.
2802 */
Florian Westphal6853dd42017-08-09 20:41:51 +02002803 rcu_read_lock();
2804 for_each_netdev_rcu(net, dev) {
Greg Rose115c9b82012-02-21 16:54:48 -05002805 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
2806 if_nlmsg_size(dev,
2807 ext_filter_mask));
2808 }
Florian Westphal6853dd42017-08-09 20:41:51 +02002809 rcu_read_unlock();
Greg Rose115c9b82012-02-21 16:54:48 -05002810
Zhang Shengju93af2052016-11-22 14:14:28 +08002811 return nlmsg_total_size(min_ifinfo_dump_size);
Greg Rosec7ac8672011-06-10 01:27:09 +00002812}
2813
Adrian Bunk42bad1d2007-04-26 00:57:41 -07002814static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815{
2816 int idx;
2817 int s_idx = cb->family;
2818
2819 if (s_idx == 0)
2820 s_idx = 1;
Florian Westphal6853dd42017-08-09 20:41:51 +02002821
Patrick McHardy25239ce2010-04-26 16:02:05 +02002822 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 int type = cb->nlh->nlmsg_type-RTM_BASE;
Florian Westphal6853dd42017-08-09 20:41:51 +02002824 struct rtnl_link *handlers;
2825 rtnl_dumpit_func dumpit;
2826
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 if (idx < s_idx || idx == PF_PACKET)
2828 continue;
Florian Westphal6853dd42017-08-09 20:41:51 +02002829
2830 handlers = rtnl_dereference(rtnl_msg_handlers[idx]);
2831 if (!handlers)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 continue;
Florian Westphal6853dd42017-08-09 20:41:51 +02002833
2834 dumpit = READ_ONCE(handlers[type].dumpit);
2835 if (!dumpit)
2836 continue;
2837
Nicolas Dichtel04652772013-03-22 06:28:42 +00002838 if (idx > s_idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 memset(&cb->args[0], 0, sizeof(cb->args));
Nicolas Dichtel04652772013-03-22 06:28:42 +00002840 cb->prev_seq = 0;
2841 cb->seq = 0;
2842 }
Florian Westphal6853dd42017-08-09 20:41:51 +02002843 if (dumpit(skb, cb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 break;
2845 }
2846 cb->family = idx;
2847
2848 return skb->len;
2849}
2850
Mahesh Bandewar395eea62014-12-03 13:46:24 -08002851struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04002852 unsigned int change,
2853 u32 event, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002855 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 struct sk_buff *skb;
Thomas Graf0ec6d3f2006-08-15 00:37:09 -07002857 int err = -ENOBUFS;
Greg Rosec7ac8672011-06-10 01:27:09 +00002858 size_t if_info_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
Alexei Starovoitov7f294052013-10-23 16:02:42 -07002860 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
Thomas Graf0ec6d3f2006-08-15 00:37:09 -07002861 if (skb == NULL)
2862 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04002864 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0, event);
Patrick McHardy26932562007-01-31 23:16:40 -08002865 if (err < 0) {
2866 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
2867 WARN_ON(err == -EMSGSIZE);
2868 kfree_skb(skb);
2869 goto errout;
2870 }
Mahesh Bandewar395eea62014-12-03 13:46:24 -08002871 return skb;
Thomas Graf0ec6d3f2006-08-15 00:37:09 -07002872errout:
2873 if (err < 0)
Eric W. Biederman4b3da702007-11-19 22:27:40 -08002874 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
Mahesh Bandewar395eea62014-12-03 13:46:24 -08002875 return NULL;
2876}
2877
2878void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
2879{
2880 struct net *net = dev_net(dev);
2881
2882 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
2883}
2884
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04002885static void rtmsg_ifinfo_event(int type, struct net_device *dev,
2886 unsigned int change, u32 event,
2887 gfp_t flags)
Mahesh Bandewar395eea62014-12-03 13:46:24 -08002888{
2889 struct sk_buff *skb;
2890
Nicolas Dichteled2a80a2015-05-13 14:19:42 +02002891 if (dev->reg_state != NETREG_REGISTERED)
2892 return;
2893
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04002894 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags);
Mahesh Bandewar395eea62014-12-03 13:46:24 -08002895 if (skb)
2896 rtmsg_ifinfo_send(skb, dev, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897}
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04002898
2899void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
2900 gfp_t flags)
2901{
Vlad Yasevich8c6c9182017-05-30 10:07:02 -04002902 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags);
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04002903}
Jiri Pirko471cb5a2013-01-03 22:49:01 +00002904EXPORT_SYMBOL(rtmsg_ifinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
John Fastabendd83b0602012-04-15 06:44:08 +00002906static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
2907 struct net_device *dev,
Hubert Sokolowski1e53d5b2015-04-09 12:16:17 +00002908 u8 *addr, u16 vid, u32 pid, u32 seq,
Nicolas Dichtel1c104a62014-03-19 17:47:49 +01002909 int type, unsigned int flags,
Hubert Sokolowskib3379042015-12-15 13:20:30 +00002910 int nlflags, u16 ndm_state)
John Fastabendd83b0602012-04-15 06:44:08 +00002911{
2912 struct nlmsghdr *nlh;
2913 struct ndmsg *ndm;
2914
Nicolas Dichtel1c104a62014-03-19 17:47:49 +01002915 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
John Fastabendd83b0602012-04-15 06:44:08 +00002916 if (!nlh)
2917 return -EMSGSIZE;
2918
2919 ndm = nlmsg_data(nlh);
2920 ndm->ndm_family = AF_BRIDGE;
2921 ndm->ndm_pad1 = 0;
2922 ndm->ndm_pad2 = 0;
2923 ndm->ndm_flags = flags;
2924 ndm->ndm_type = 0;
2925 ndm->ndm_ifindex = dev->ifindex;
Hubert Sokolowskib3379042015-12-15 13:20:30 +00002926 ndm->ndm_state = ndm_state;
John Fastabendd83b0602012-04-15 06:44:08 +00002927
2928 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
2929 goto nla_put_failure;
Hubert Sokolowski1e53d5b2015-04-09 12:16:17 +00002930 if (vid)
2931 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
2932 goto nla_put_failure;
John Fastabendd83b0602012-04-15 06:44:08 +00002933
Johannes Berg053c0952015-01-16 22:09:00 +01002934 nlmsg_end(skb, nlh);
2935 return 0;
John Fastabendd83b0602012-04-15 06:44:08 +00002936
2937nla_put_failure:
2938 nlmsg_cancel(skb, nlh);
2939 return -EMSGSIZE;
2940}
2941
John Fastabend3ff661c2012-04-15 06:44:14 +00002942static inline size_t rtnl_fdb_nlmsg_size(void)
2943{
Sabrina Dubrocaf82ef3e2016-11-18 15:50:39 +01002944 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
2945 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
2946 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
2947 0;
John Fastabend3ff661c2012-04-15 06:44:14 +00002948}
2949
Hubert Sokolowskib3379042015-12-15 13:20:30 +00002950static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
2951 u16 ndm_state)
John Fastabend3ff661c2012-04-15 06:44:14 +00002952{
2953 struct net *net = dev_net(dev);
2954 struct sk_buff *skb;
2955 int err = -ENOBUFS;
2956
2957 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
2958 if (!skb)
2959 goto errout;
2960
Hubert Sokolowski1e53d5b2015-04-09 12:16:17 +00002961 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
Hubert Sokolowskib3379042015-12-15 13:20:30 +00002962 0, 0, type, NTF_SELF, 0, ndm_state);
John Fastabend3ff661c2012-04-15 06:44:14 +00002963 if (err < 0) {
2964 kfree_skb(skb);
2965 goto errout;
2966 }
2967
2968 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2969 return;
2970errout:
2971 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2972}
2973
Vlad Yasevich090096b2013-03-06 15:39:42 +00002974/**
2975 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
2976 */
2977int ndo_dflt_fdb_add(struct ndmsg *ndm,
2978 struct nlattr *tb[],
2979 struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01002980 const unsigned char *addr, u16 vid,
Vlad Yasevich090096b2013-03-06 15:39:42 +00002981 u16 flags)
2982{
2983 int err = -EINVAL;
2984
2985 /* If aging addresses are supported device will need to
2986 * implement its own handler for this.
2987 */
2988 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
2989 pr_info("%s: FDB only supports static addresses\n", dev->name);
2990 return err;
2991 }
2992
Or Gerlitz65891fe2014-12-14 18:19:05 +02002993 if (vid) {
2994 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
2995 return err;
2996 }
2997
Vlad Yasevich090096b2013-03-06 15:39:42 +00002998 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
2999 err = dev_uc_add_excl(dev, addr);
3000 else if (is_multicast_ether_addr(addr))
3001 err = dev_mc_add_excl(dev, addr);
3002
3003 /* Only return duplicate errors if NLM_F_EXCL is set */
3004 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3005 err = 0;
3006
3007 return err;
3008}
3009EXPORT_SYMBOL(ndo_dflt_fdb_add);
3010
Jiri Pirkof6f64242014-11-28 14:34:15 +01003011static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid)
3012{
3013 u16 vid = 0;
3014
3015 if (vlan_attr) {
3016 if (nla_len(vlan_attr) != sizeof(u16)) {
3017 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
3018 return -EINVAL;
3019 }
3020
3021 vid = nla_get_u16(vlan_attr);
3022
3023 if (!vid || vid >= VLAN_VID_MASK) {
3024 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
3025 vid);
3026 return -EINVAL;
3027 }
3028 }
3029 *p_vid = vid;
3030 return 0;
3031}
3032
David Ahernc21ef3e2017-04-16 09:48:24 -07003033static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3034 struct netlink_ext_ack *extack)
John Fastabend77162022012-04-15 06:43:56 +00003035{
3036 struct net *net = sock_net(skb->sk);
John Fastabend77162022012-04-15 06:43:56 +00003037 struct ndmsg *ndm;
3038 struct nlattr *tb[NDA_MAX+1];
3039 struct net_device *dev;
3040 u8 *addr;
Jiri Pirkof6f64242014-11-28 14:34:15 +01003041 u16 vid;
John Fastabend77162022012-04-15 06:43:56 +00003042 int err;
3043
David Ahernc21ef3e2017-04-16 09:48:24 -07003044 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
John Fastabend77162022012-04-15 06:43:56 +00003045 if (err < 0)
3046 return err;
3047
3048 ndm = nlmsg_data(nlh);
3049 if (ndm->ndm_ifindex == 0) {
3050 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
3051 return -EINVAL;
3052 }
3053
3054 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3055 if (dev == NULL) {
3056 pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
3057 return -ENODEV;
3058 }
3059
3060 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3061 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
3062 return -EINVAL;
3063 }
3064
3065 addr = nla_data(tb[NDA_LLADDR]);
John Fastabend77162022012-04-15 06:43:56 +00003066
Jiri Pirkof6f64242014-11-28 14:34:15 +01003067 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3068 if (err)
3069 return err;
3070
John Fastabend77162022012-04-15 06:43:56 +00003071 err = -EOPNOTSUPP;
3072
3073 /* Support fdb on master device the net/bridge default case */
3074 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3075 (dev->priv_flags & IFF_BRIDGE_PORT)) {
Jiri Pirko898e5062013-01-03 22:48:52 +00003076 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3077 const struct net_device_ops *ops = br_dev->netdev_ops;
3078
Jiri Pirkof6f64242014-11-28 14:34:15 +01003079 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
3080 nlh->nlmsg_flags);
John Fastabend77162022012-04-15 06:43:56 +00003081 if (err)
3082 goto out;
3083 else
3084 ndm->ndm_flags &= ~NTF_MASTER;
3085 }
3086
3087 /* Embedded bridge, macvlan, and any other device support */
Vlad Yasevich090096b2013-03-06 15:39:42 +00003088 if ((ndm->ndm_flags & NTF_SELF)) {
3089 if (dev->netdev_ops->ndo_fdb_add)
3090 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
Jiri Pirkof6f64242014-11-28 14:34:15 +01003091 vid,
Vlad Yasevich090096b2013-03-06 15:39:42 +00003092 nlh->nlmsg_flags);
3093 else
Jiri Pirkof6f64242014-11-28 14:34:15 +01003094 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
Vlad Yasevich090096b2013-03-06 15:39:42 +00003095 nlh->nlmsg_flags);
John Fastabend77162022012-04-15 06:43:56 +00003096
John Fastabend3ff661c2012-04-15 06:44:14 +00003097 if (!err) {
Hubert Sokolowskib3379042015-12-15 13:20:30 +00003098 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
3099 ndm->ndm_state);
John Fastabend77162022012-04-15 06:43:56 +00003100 ndm->ndm_flags &= ~NTF_SELF;
John Fastabend3ff661c2012-04-15 06:44:14 +00003101 }
John Fastabend77162022012-04-15 06:43:56 +00003102 }
3103out:
3104 return err;
3105}
3106
Vlad Yasevich090096b2013-03-06 15:39:42 +00003107/**
3108 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3109 */
3110int ndo_dflt_fdb_del(struct ndmsg *ndm,
3111 struct nlattr *tb[],
3112 struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01003113 const unsigned char *addr, u16 vid)
Vlad Yasevich090096b2013-03-06 15:39:42 +00003114{
Alexander Duyckc8a89c42014-07-15 15:15:20 -07003115 int err = -EINVAL;
Vlad Yasevich090096b2013-03-06 15:39:42 +00003116
3117 /* If aging addresses are supported device will need to
3118 * implement its own handler for this.
3119 */
Sridhar Samudrala64535992013-08-08 15:19:48 -07003120 if (!(ndm->ndm_state & NUD_PERMANENT)) {
Vlad Yasevich090096b2013-03-06 15:39:42 +00003121 pr_info("%s: FDB only supports static addresses\n", dev->name);
Alexander Duyckc8a89c42014-07-15 15:15:20 -07003122 return err;
Vlad Yasevich090096b2013-03-06 15:39:42 +00003123 }
3124
3125 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3126 err = dev_uc_del(dev, addr);
3127 else if (is_multicast_ether_addr(addr))
3128 err = dev_mc_del(dev, addr);
Vlad Yasevich090096b2013-03-06 15:39:42 +00003129
3130 return err;
3131}
3132EXPORT_SYMBOL(ndo_dflt_fdb_del);
3133
David Ahernc21ef3e2017-04-16 09:48:24 -07003134static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
3135 struct netlink_ext_ack *extack)
John Fastabend77162022012-04-15 06:43:56 +00003136{
3137 struct net *net = sock_net(skb->sk);
3138 struct ndmsg *ndm;
Vlad Yasevich1690be62013-02-13 12:00:18 +00003139 struct nlattr *tb[NDA_MAX+1];
John Fastabend77162022012-04-15 06:43:56 +00003140 struct net_device *dev;
3141 int err = -EINVAL;
3142 __u8 *addr;
Jiri Pirkof6f64242014-11-28 14:34:15 +01003143 u16 vid;
John Fastabend77162022012-04-15 06:43:56 +00003144
Eric W. Biederman90f62cf2014-04-23 14:29:27 -07003145 if (!netlink_capable(skb, CAP_NET_ADMIN))
Vlad Yasevich1690be62013-02-13 12:00:18 +00003146 return -EPERM;
3147
David Ahernc21ef3e2017-04-16 09:48:24 -07003148 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
Vlad Yasevich1690be62013-02-13 12:00:18 +00003149 if (err < 0)
3150 return err;
John Fastabend77162022012-04-15 06:43:56 +00003151
3152 ndm = nlmsg_data(nlh);
3153 if (ndm->ndm_ifindex == 0) {
3154 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
3155 return -EINVAL;
3156 }
3157
3158 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3159 if (dev == NULL) {
3160 pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
3161 return -ENODEV;
3162 }
3163
Vlad Yasevich1690be62013-02-13 12:00:18 +00003164 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3165 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
John Fastabend77162022012-04-15 06:43:56 +00003166 return -EINVAL;
3167 }
3168
Vlad Yasevich1690be62013-02-13 12:00:18 +00003169 addr = nla_data(tb[NDA_LLADDR]);
Vlad Yasevich1690be62013-02-13 12:00:18 +00003170
Jiri Pirkof6f64242014-11-28 14:34:15 +01003171 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3172 if (err)
3173 return err;
3174
John Fastabend77162022012-04-15 06:43:56 +00003175 err = -EOPNOTSUPP;
3176
3177 /* Support fdb on master device the net/bridge default case */
3178 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3179 (dev->priv_flags & IFF_BRIDGE_PORT)) {
Jiri Pirko898e5062013-01-03 22:48:52 +00003180 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3181 const struct net_device_ops *ops = br_dev->netdev_ops;
John Fastabend77162022012-04-15 06:43:56 +00003182
Jiri Pirko898e5062013-01-03 22:48:52 +00003183 if (ops->ndo_fdb_del)
Jiri Pirkof6f64242014-11-28 14:34:15 +01003184 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
John Fastabend77162022012-04-15 06:43:56 +00003185
3186 if (err)
3187 goto out;
3188 else
3189 ndm->ndm_flags &= ~NTF_MASTER;
3190 }
3191
3192 /* Embedded bridge, macvlan, and any other device support */
Vlad Yasevich090096b2013-03-06 15:39:42 +00003193 if (ndm->ndm_flags & NTF_SELF) {
3194 if (dev->netdev_ops->ndo_fdb_del)
Jiri Pirkof6f64242014-11-28 14:34:15 +01003195 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
3196 vid);
Vlad Yasevich090096b2013-03-06 15:39:42 +00003197 else
Jiri Pirkof6f64242014-11-28 14:34:15 +01003198 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
John Fastabend77162022012-04-15 06:43:56 +00003199
John Fastabend3ff661c2012-04-15 06:44:14 +00003200 if (!err) {
Hubert Sokolowskib3379042015-12-15 13:20:30 +00003201 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
3202 ndm->ndm_state);
John Fastabend77162022012-04-15 06:43:56 +00003203 ndm->ndm_flags &= ~NTF_SELF;
John Fastabend3ff661c2012-04-15 06:44:14 +00003204 }
John Fastabend77162022012-04-15 06:43:56 +00003205 }
3206out:
3207 return err;
3208}
3209
John Fastabendd83b0602012-04-15 06:44:08 +00003210static int nlmsg_populate_fdb(struct sk_buff *skb,
3211 struct netlink_callback *cb,
3212 struct net_device *dev,
3213 int *idx,
3214 struct netdev_hw_addr_list *list)
3215{
3216 struct netdev_hw_addr *ha;
3217 int err;
Eric W. Biederman15e47302012-09-07 20:12:54 +00003218 u32 portid, seq;
John Fastabendd83b0602012-04-15 06:44:08 +00003219
Eric W. Biederman15e47302012-09-07 20:12:54 +00003220 portid = NETLINK_CB(cb->skb).portid;
John Fastabendd83b0602012-04-15 06:44:08 +00003221 seq = cb->nlh->nlmsg_seq;
3222
3223 list_for_each_entry(ha, &list->list, list) {
Roopa Prabhud2976532016-08-30 21:56:45 -07003224 if (*idx < cb->args[2])
John Fastabendd83b0602012-04-15 06:44:08 +00003225 goto skip;
3226
Hubert Sokolowski1e53d5b2015-04-09 12:16:17 +00003227 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
John Fastabenda7a558f2012-11-01 16:23:10 +00003228 portid, seq,
Nicolas Dichtel1c104a62014-03-19 17:47:49 +01003229 RTM_NEWNEIGH, NTF_SELF,
Hubert Sokolowskib3379042015-12-15 13:20:30 +00003230 NLM_F_MULTI, NUD_PERMANENT);
John Fastabendd83b0602012-04-15 06:44:08 +00003231 if (err < 0)
3232 return err;
3233skip:
3234 *idx += 1;
3235 }
3236 return 0;
3237}
3238
3239/**
Ben Hutchings2c530402012-07-10 10:55:09 +00003240 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
John Fastabendd83b0602012-04-15 06:44:08 +00003241 * @nlh: netlink message header
3242 * @dev: netdevice
3243 *
3244 * Default netdevice operation to dump the existing unicast address list.
John Fastabend91f3e7b2013-03-29 08:18:37 +00003245 * Returns number of addresses from list put in skb.
John Fastabendd83b0602012-04-15 06:44:08 +00003246 */
3247int ndo_dflt_fdb_dump(struct sk_buff *skb,
3248 struct netlink_callback *cb,
3249 struct net_device *dev,
Jamal Hadi Salim5d5eacb2014-07-10 07:01:58 -04003250 struct net_device *filter_dev,
Roopa Prabhud2976532016-08-30 21:56:45 -07003251 int *idx)
John Fastabendd83b0602012-04-15 06:44:08 +00003252{
3253 int err;
3254
3255 netif_addr_lock_bh(dev);
Roopa Prabhud2976532016-08-30 21:56:45 -07003256 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
John Fastabendd83b0602012-04-15 06:44:08 +00003257 if (err)
3258 goto out;
Zhang Shengju2934c9d2016-11-30 16:37:34 +08003259 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
John Fastabendd83b0602012-04-15 06:44:08 +00003260out:
3261 netif_addr_unlock_bh(dev);
Roopa Prabhud2976532016-08-30 21:56:45 -07003262 return err;
John Fastabendd83b0602012-04-15 06:44:08 +00003263}
3264EXPORT_SYMBOL(ndo_dflt_fdb_dump);
3265
John Fastabend77162022012-04-15 06:43:56 +00003266static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3267{
John Fastabend77162022012-04-15 06:43:56 +00003268 struct net_device *dev;
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003269 struct nlattr *tb[IFLA_MAX+1];
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003270 struct net_device *br_dev = NULL;
3271 const struct net_device_ops *ops = NULL;
3272 const struct net_device_ops *cops = NULL;
3273 struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
3274 struct net *net = sock_net(skb->sk);
Roopa Prabhud2976532016-08-30 21:56:45 -07003275 struct hlist_head *head;
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003276 int brport_idx = 0;
3277 int br_idx = 0;
Roopa Prabhud2976532016-08-30 21:56:45 -07003278 int h, s_h;
3279 int idx = 0, s_idx;
3280 int err = 0;
3281 int fidx = 0;
John Fastabend77162022012-04-15 06:43:56 +00003282
Alexander Potapenko0ff50e82017-05-23 13:20:28 +02003283 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3284 IFLA_MAX, ifla_policy, NULL);
3285 if (err < 0) {
3286 return -EINVAL;
3287 } else if (err == 0) {
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003288 if (tb[IFLA_MASTER])
3289 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3290 }
John Fastabend77162022012-04-15 06:43:56 +00003291
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003292 brport_idx = ifm->ifi_index;
3293
3294 if (br_idx) {
3295 br_dev = __dev_get_by_index(net, br_idx);
3296 if (!br_dev)
3297 return -ENODEV;
3298
3299 ops = br_dev->netdev_ops;
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003300 }
3301
Roopa Prabhud2976532016-08-30 21:56:45 -07003302 s_h = cb->args[0];
3303 s_idx = cb->args[1];
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003304
Roopa Prabhud2976532016-08-30 21:56:45 -07003305 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3306 idx = 0;
3307 head = &net->dev_index_head[h];
3308 hlist_for_each_entry(dev, head, index_hlist) {
3309
3310 if (brport_idx && (dev->ifindex != brport_idx))
3311 continue;
3312
3313 if (!br_idx) { /* user did not specify a specific bridge */
3314 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3315 br_dev = netdev_master_upper_dev_get(dev);
3316 cops = br_dev->netdev_ops;
3317 }
3318 } else {
3319 if (dev != br_dev &&
3320 !(dev->priv_flags & IFF_BRIDGE_PORT))
3321 continue;
3322
3323 if (br_dev != netdev_master_upper_dev_get(dev) &&
3324 !(dev->priv_flags & IFF_EBRIDGE))
3325 continue;
3326 cops = ops;
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003327 }
3328
Roopa Prabhud2976532016-08-30 21:56:45 -07003329 if (idx < s_idx)
3330 goto cont;
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003331
Roopa Prabhud2976532016-08-30 21:56:45 -07003332 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3333 if (cops && cops->ndo_fdb_dump) {
3334 err = cops->ndo_fdb_dump(skb, cb,
3335 br_dev, dev,
3336 &fidx);
3337 if (err == -EMSGSIZE)
3338 goto out;
3339 }
3340 }
Jamal Hadi Salim5e6d2432014-07-10 07:01:59 -04003341
Roopa Prabhud2976532016-08-30 21:56:45 -07003342 if (dev->netdev_ops->ndo_fdb_dump)
3343 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
3344 dev, NULL,
3345 &fidx);
3346 else
3347 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
3348 &fidx);
3349 if (err == -EMSGSIZE)
3350 goto out;
3351
3352 cops = NULL;
3353
3354 /* reset fdb offset to 0 for rest of the interfaces */
3355 cb->args[2] = 0;
3356 fidx = 0;
3357cont:
3358 idx++;
John Fastabend77162022012-04-15 06:43:56 +00003359 }
John Fastabend77162022012-04-15 06:43:56 +00003360 }
John Fastabend77162022012-04-15 06:43:56 +00003361
Roopa Prabhud2976532016-08-30 21:56:45 -07003362out:
3363 cb->args[0] = h;
3364 cb->args[1] = idx;
3365 cb->args[2] = fidx;
3366
John Fastabend77162022012-04-15 06:43:56 +00003367 return skb->len;
3368}
3369
Scott Feldman2c3c0312014-11-28 14:34:25 +01003370static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
3371 unsigned int attrnum, unsigned int flag)
3372{
3373 if (mask & flag)
3374 return nla_put_u8(skb, attrnum, !!(flags & flag));
3375 return 0;
3376}
3377
John Fastabend815cccb2012-10-24 08:13:09 +00003378int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Scott Feldman2c3c0312014-11-28 14:34:25 +01003379 struct net_device *dev, u16 mode,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07003380 u32 flags, u32 mask, int nlflags,
3381 u32 filter_mask,
3382 int (*vlan_fill)(struct sk_buff *skb,
3383 struct net_device *dev,
3384 u32 filter_mask))
John Fastabend815cccb2012-10-24 08:13:09 +00003385{
3386 struct nlmsghdr *nlh;
3387 struct ifinfomsg *ifm;
3388 struct nlattr *br_afspec;
Scott Feldman2c3c0312014-11-28 14:34:25 +01003389 struct nlattr *protinfo;
John Fastabend815cccb2012-10-24 08:13:09 +00003390 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
Jiri Pirko898e5062013-01-03 22:48:52 +00003391 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07003392 int err = 0;
John Fastabend815cccb2012-10-24 08:13:09 +00003393
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02003394 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
John Fastabend815cccb2012-10-24 08:13:09 +00003395 if (nlh == NULL)
3396 return -EMSGSIZE;
3397
3398 ifm = nlmsg_data(nlh);
3399 ifm->ifi_family = AF_BRIDGE;
3400 ifm->__ifi_pad = 0;
3401 ifm->ifi_type = dev->type;
3402 ifm->ifi_index = dev->ifindex;
3403 ifm->ifi_flags = dev_get_flags(dev);
3404 ifm->ifi_change = 0;
3405
3406
3407 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
3408 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
3409 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
Jiri Pirko898e5062013-01-03 22:48:52 +00003410 (br_dev &&
3411 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
John Fastabend815cccb2012-10-24 08:13:09 +00003412 (dev->addr_len &&
3413 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
Nicolas Dichtela54acb32015-04-02 17:07:00 +02003414 (dev->ifindex != dev_get_iflink(dev) &&
3415 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
John Fastabend815cccb2012-10-24 08:13:09 +00003416 goto nla_put_failure;
3417
3418 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
3419 if (!br_afspec)
3420 goto nla_put_failure;
3421
Roopa Prabhu1d460b92014-12-08 14:04:20 -08003422 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
John Fastabend815cccb2012-10-24 08:13:09 +00003423 nla_nest_cancel(skb, br_afspec);
3424 goto nla_put_failure;
3425 }
Roopa Prabhu1d460b92014-12-08 14:04:20 -08003426
3427 if (mode != BRIDGE_MODE_UNDEF) {
3428 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
3429 nla_nest_cancel(skb, br_afspec);
3430 goto nla_put_failure;
3431 }
3432 }
Scott Feldman7d4f8d82015-06-22 00:27:17 -07003433 if (vlan_fill) {
3434 err = vlan_fill(skb, dev, filter_mask);
3435 if (err) {
3436 nla_nest_cancel(skb, br_afspec);
3437 goto nla_put_failure;
3438 }
3439 }
John Fastabend815cccb2012-10-24 08:13:09 +00003440 nla_nest_end(skb, br_afspec);
3441
Scott Feldman2c3c0312014-11-28 14:34:25 +01003442 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
3443 if (!protinfo)
3444 goto nla_put_failure;
3445
3446 if (brport_nla_put_flag(skb, flags, mask,
3447 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
3448 brport_nla_put_flag(skb, flags, mask,
3449 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
3450 brport_nla_put_flag(skb, flags, mask,
3451 IFLA_BRPORT_FAST_LEAVE,
3452 BR_MULTICAST_FAST_LEAVE) ||
3453 brport_nla_put_flag(skb, flags, mask,
3454 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
3455 brport_nla_put_flag(skb, flags, mask,
3456 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
3457 brport_nla_put_flag(skb, flags, mask,
3458 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
3459 brport_nla_put_flag(skb, flags, mask,
3460 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
3461 brport_nla_put_flag(skb, flags, mask,
3462 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
3463 nla_nest_cancel(skb, protinfo);
3464 goto nla_put_failure;
3465 }
3466
3467 nla_nest_end(skb, protinfo);
3468
Johannes Berg053c0952015-01-16 22:09:00 +01003469 nlmsg_end(skb, nlh);
3470 return 0;
John Fastabend815cccb2012-10-24 08:13:09 +00003471nla_put_failure:
3472 nlmsg_cancel(skb, nlh);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07003473 return err ? err : -EMSGSIZE;
John Fastabend815cccb2012-10-24 08:13:09 +00003474}
Scott Feldman7d4f8d82015-06-22 00:27:17 -07003475EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
John Fastabend815cccb2012-10-24 08:13:09 +00003476
John Fastabende5a55a82012-10-24 08:12:57 +00003477static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3478{
3479 struct net *net = sock_net(skb->sk);
3480 struct net_device *dev;
3481 int idx = 0;
3482 u32 portid = NETLINK_CB(cb->skb).portid;
3483 u32 seq = cb->nlh->nlmsg_seq;
Vlad Yasevich6cbdcee2013-02-13 12:00:13 +00003484 u32 filter_mask = 0;
Roopa Prabhud64f69b2015-09-15 14:44:29 -07003485 int err;
Vlad Yasevich6cbdcee2013-02-13 12:00:13 +00003486
Thomas Grafaa68c202014-11-26 13:42:20 +01003487 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
3488 struct nlattr *extfilt;
3489
3490 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
3491 IFLA_EXT_MASK);
3492 if (extfilt) {
3493 if (nla_len(extfilt) < sizeof(filter_mask))
3494 return -EINVAL;
3495
3496 filter_mask = nla_get_u32(extfilt);
3497 }
3498 }
John Fastabende5a55a82012-10-24 08:12:57 +00003499
3500 rcu_read_lock();
3501 for_each_netdev_rcu(net, dev) {
3502 const struct net_device_ops *ops = dev->netdev_ops;
Jiri Pirko898e5062013-01-03 22:48:52 +00003503 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
John Fastabende5a55a82012-10-24 08:12:57 +00003504
Jiri Pirko898e5062013-01-03 22:48:52 +00003505 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
Roopa Prabhud64f69b2015-09-15 14:44:29 -07003506 if (idx >= cb->args[0]) {
3507 err = br_dev->netdev_ops->ndo_bridge_getlink(
3508 skb, portid, seq, dev,
3509 filter_mask, NLM_F_MULTI);
David Ahernf6c57752017-05-15 23:19:17 -07003510 if (err < 0 && err != -EOPNOTSUPP) {
3511 if (likely(skb->len))
3512 break;
3513
3514 goto out_err;
3515 }
Roopa Prabhud64f69b2015-09-15 14:44:29 -07003516 }
Ben Hutchings25b1e672012-11-02 12:56:52 +00003517 idx++;
John Fastabende5a55a82012-10-24 08:12:57 +00003518 }
3519
3520 if (ops->ndo_bridge_getlink) {
Roopa Prabhud64f69b2015-09-15 14:44:29 -07003521 if (idx >= cb->args[0]) {
3522 err = ops->ndo_bridge_getlink(skb, portid,
3523 seq, dev,
3524 filter_mask,
3525 NLM_F_MULTI);
David Ahernf6c57752017-05-15 23:19:17 -07003526 if (err < 0 && err != -EOPNOTSUPP) {
3527 if (likely(skb->len))
3528 break;
3529
3530 goto out_err;
3531 }
Roopa Prabhud64f69b2015-09-15 14:44:29 -07003532 }
Ben Hutchings25b1e672012-11-02 12:56:52 +00003533 idx++;
John Fastabende5a55a82012-10-24 08:12:57 +00003534 }
3535 }
David Ahernf6c57752017-05-15 23:19:17 -07003536 err = skb->len;
3537out_err:
John Fastabende5a55a82012-10-24 08:12:57 +00003538 rcu_read_unlock();
3539 cb->args[0] = idx;
3540
David Ahernf6c57752017-05-15 23:19:17 -07003541 return err;
John Fastabende5a55a82012-10-24 08:12:57 +00003542}
3543
John Fastabend2469ffd2012-10-24 08:13:03 +00003544static inline size_t bridge_nlmsg_size(void)
3545{
3546 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
3547 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
3548 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
3549 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
3550 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
3551 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
3552 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
3553 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
3554 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
3555 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
3556 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
3557}
3558
Roopa Prabhu02dba432015-01-14 20:02:25 -08003559static int rtnl_bridge_notify(struct net_device *dev)
John Fastabend2469ffd2012-10-24 08:13:03 +00003560{
3561 struct net *net = dev_net(dev);
John Fastabend2469ffd2012-10-24 08:13:03 +00003562 struct sk_buff *skb;
3563 int err = -EOPNOTSUPP;
3564
Roopa Prabhu02dba432015-01-14 20:02:25 -08003565 if (!dev->netdev_ops->ndo_bridge_getlink)
3566 return 0;
3567
John Fastabend2469ffd2012-10-24 08:13:03 +00003568 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
3569 if (!skb) {
3570 err = -ENOMEM;
3571 goto errout;
3572 }
3573
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02003574 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
Roopa Prabhu02dba432015-01-14 20:02:25 -08003575 if (err < 0)
3576 goto errout;
John Fastabend2469ffd2012-10-24 08:13:03 +00003577
Roopa Prabhu59ccaaa2015-01-28 16:23:11 -08003578 if (!skb->len)
3579 goto errout;
3580
John Fastabend2469ffd2012-10-24 08:13:03 +00003581 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
3582 return 0;
3583errout:
3584 WARN_ON(err == -EMSGSIZE);
3585 kfree_skb(skb);
Roopa Prabhu59ccaaa2015-01-28 16:23:11 -08003586 if (err)
3587 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
John Fastabend2469ffd2012-10-24 08:13:03 +00003588 return err;
3589}
3590
David Ahernc21ef3e2017-04-16 09:48:24 -07003591static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3592 struct netlink_ext_ack *extack)
John Fastabende5a55a82012-10-24 08:12:57 +00003593{
3594 struct net *net = sock_net(skb->sk);
3595 struct ifinfomsg *ifm;
3596 struct net_device *dev;
John Fastabend2469ffd2012-10-24 08:13:03 +00003597 struct nlattr *br_spec, *attr = NULL;
3598 int rem, err = -EOPNOTSUPP;
Rosen, Rami4de8b412015-01-19 11:45:04 +02003599 u16 flags = 0;
John Fastabendc38e01b2012-11-02 16:32:36 +00003600 bool have_flags = false;
John Fastabende5a55a82012-10-24 08:12:57 +00003601
3602 if (nlmsg_len(nlh) < sizeof(*ifm))
3603 return -EINVAL;
3604
3605 ifm = nlmsg_data(nlh);
3606 if (ifm->ifi_family != AF_BRIDGE)
3607 return -EPFNOSUPPORT;
3608
3609 dev = __dev_get_by_index(net, ifm->ifi_index);
3610 if (!dev) {
3611 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3612 return -ENODEV;
3613 }
3614
John Fastabend2469ffd2012-10-24 08:13:03 +00003615 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3616 if (br_spec) {
3617 nla_for_each_nested(attr, br_spec, rem) {
3618 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
Thomas Graf6e8d1c52014-11-26 13:42:16 +01003619 if (nla_len(attr) < sizeof(flags))
3620 return -EINVAL;
3621
John Fastabendc38e01b2012-11-02 16:32:36 +00003622 have_flags = true;
John Fastabend2469ffd2012-10-24 08:13:03 +00003623 flags = nla_get_u16(attr);
3624 break;
3625 }
3626 }
3627 }
3628
3629 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
Jiri Pirko898e5062013-01-03 22:48:52 +00003630 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3631
3632 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
John Fastabend2469ffd2012-10-24 08:13:03 +00003633 err = -EOPNOTSUPP;
3634 goto out;
3635 }
3636
Roopa Prabhuadd511b2015-01-29 22:40:12 -08003637 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
John Fastabende5a55a82012-10-24 08:12:57 +00003638 if (err)
3639 goto out;
John Fastabend2469ffd2012-10-24 08:13:03 +00003640
3641 flags &= ~BRIDGE_FLAGS_MASTER;
John Fastabende5a55a82012-10-24 08:12:57 +00003642 }
3643
John Fastabend2469ffd2012-10-24 08:13:03 +00003644 if ((flags & BRIDGE_FLAGS_SELF)) {
3645 if (!dev->netdev_ops->ndo_bridge_setlink)
3646 err = -EOPNOTSUPP;
3647 else
Roopa Prabhuadd511b2015-01-29 22:40:12 -08003648 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
3649 flags);
Roopa Prabhu02dba432015-01-14 20:02:25 -08003650 if (!err) {
John Fastabend2469ffd2012-10-24 08:13:03 +00003651 flags &= ~BRIDGE_FLAGS_SELF;
Roopa Prabhu02dba432015-01-14 20:02:25 -08003652
3653 /* Generate event to notify upper layer of bridge
3654 * change
3655 */
3656 err = rtnl_bridge_notify(dev);
3657 }
John Fastabend2469ffd2012-10-24 08:13:03 +00003658 }
3659
John Fastabendc38e01b2012-11-02 16:32:36 +00003660 if (have_flags)
John Fastabend2469ffd2012-10-24 08:13:03 +00003661 memcpy(nla_data(attr), &flags, sizeof(flags));
John Fastabende5a55a82012-10-24 08:12:57 +00003662out:
3663 return err;
3664}
3665
David Ahernc21ef3e2017-04-16 09:48:24 -07003666static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3667 struct netlink_ext_ack *extack)
Vlad Yasevich407af322013-02-13 12:00:12 +00003668{
3669 struct net *net = sock_net(skb->sk);
3670 struct ifinfomsg *ifm;
3671 struct net_device *dev;
3672 struct nlattr *br_spec, *attr = NULL;
3673 int rem, err = -EOPNOTSUPP;
Rosen, Rami4de8b412015-01-19 11:45:04 +02003674 u16 flags = 0;
Vlad Yasevich407af322013-02-13 12:00:12 +00003675 bool have_flags = false;
3676
3677 if (nlmsg_len(nlh) < sizeof(*ifm))
3678 return -EINVAL;
3679
3680 ifm = nlmsg_data(nlh);
3681 if (ifm->ifi_family != AF_BRIDGE)
3682 return -EPFNOSUPPORT;
3683
3684 dev = __dev_get_by_index(net, ifm->ifi_index);
3685 if (!dev) {
3686 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3687 return -ENODEV;
3688 }
3689
3690 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3691 if (br_spec) {
3692 nla_for_each_nested(attr, br_spec, rem) {
3693 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
Thomas Graf6e8d1c52014-11-26 13:42:16 +01003694 if (nla_len(attr) < sizeof(flags))
3695 return -EINVAL;
3696
Vlad Yasevich407af322013-02-13 12:00:12 +00003697 have_flags = true;
3698 flags = nla_get_u16(attr);
3699 break;
3700 }
3701 }
3702 }
3703
Vlad Yasevich407af322013-02-13 12:00:12 +00003704 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3705 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3706
3707 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
3708 err = -EOPNOTSUPP;
3709 goto out;
3710 }
3711
Roopa Prabhuadd511b2015-01-29 22:40:12 -08003712 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
Vlad Yasevich407af322013-02-13 12:00:12 +00003713 if (err)
3714 goto out;
3715
3716 flags &= ~BRIDGE_FLAGS_MASTER;
3717 }
3718
3719 if ((flags & BRIDGE_FLAGS_SELF)) {
3720 if (!dev->netdev_ops->ndo_bridge_dellink)
3721 err = -EOPNOTSUPP;
3722 else
Roopa Prabhuadd511b2015-01-29 22:40:12 -08003723 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
3724 flags);
Vlad Yasevich407af322013-02-13 12:00:12 +00003725
Roopa Prabhu02dba432015-01-14 20:02:25 -08003726 if (!err) {
Vlad Yasevich407af322013-02-13 12:00:12 +00003727 flags &= ~BRIDGE_FLAGS_SELF;
Roopa Prabhu02dba432015-01-14 20:02:25 -08003728
3729 /* Generate event to notify upper layer of bridge
3730 * change
3731 */
3732 err = rtnl_bridge_notify(dev);
3733 }
Vlad Yasevich407af322013-02-13 12:00:12 +00003734 }
3735
3736 if (have_flags)
3737 memcpy(nla_data(attr), &flags, sizeof(flags));
Vlad Yasevich407af322013-02-13 12:00:12 +00003738out:
3739 return err;
3740}
3741
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02003742static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
3743{
3744 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
3745 (!idxattr || idxattr == attrid);
3746}
3747
Nogah Frankel69ae6ad2016-09-16 15:05:37 +02003748#define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
3749static int rtnl_get_offload_stats_attr_size(int attr_id)
3750{
3751 switch (attr_id) {
3752 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
3753 return sizeof(struct rtnl_link_stats64);
3754 }
3755
3756 return 0;
3757}
3758
3759static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
3760 int *prividx)
3761{
3762 struct nlattr *attr = NULL;
3763 int attr_id, size;
3764 void *attr_data;
3765 int err;
3766
3767 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3768 dev->netdev_ops->ndo_get_offload_stats))
3769 return -ENODATA;
3770
3771 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3772 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3773 if (attr_id < *prividx)
3774 continue;
3775
3776 size = rtnl_get_offload_stats_attr_size(attr_id);
3777 if (!size)
3778 continue;
3779
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02003780 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
Nogah Frankel69ae6ad2016-09-16 15:05:37 +02003781 continue;
3782
3783 attr = nla_reserve_64bit(skb, attr_id, size,
3784 IFLA_OFFLOAD_XSTATS_UNSPEC);
3785 if (!attr)
3786 goto nla_put_failure;
3787
3788 attr_data = nla_data(attr);
3789 memset(attr_data, 0, size);
3790 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
3791 attr_data);
3792 if (err)
3793 goto get_offload_stats_failure;
3794 }
3795
3796 if (!attr)
3797 return -ENODATA;
3798
3799 *prividx = 0;
3800 return 0;
3801
3802nla_put_failure:
3803 err = -EMSGSIZE;
3804get_offload_stats_failure:
3805 *prividx = attr_id;
3806 return err;
3807}
3808
3809static int rtnl_get_offload_stats_size(const struct net_device *dev)
3810{
3811 int nla_size = 0;
3812 int attr_id;
3813 int size;
3814
3815 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3816 dev->netdev_ops->ndo_get_offload_stats))
3817 return 0;
3818
3819 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3820 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02003821 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
Nogah Frankel69ae6ad2016-09-16 15:05:37 +02003822 continue;
3823 size = rtnl_get_offload_stats_attr_size(attr_id);
3824 nla_size += nla_total_size_64bit(size);
3825 }
3826
3827 if (nla_size != 0)
3828 nla_size += nla_total_size(0);
3829
3830 return nla_size;
3831}
3832
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003833static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
3834 int type, u32 pid, u32 seq, u32 change,
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02003835 unsigned int flags, unsigned int filter_mask,
3836 int *idxattr, int *prividx)
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003837{
3838 struct if_stats_msg *ifsm;
3839 struct nlmsghdr *nlh;
3840 struct nlattr *attr;
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02003841 int s_prividx = *prividx;
Nogah Frankel69ae6ad2016-09-16 15:05:37 +02003842 int err;
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003843
3844 ASSERT_RTNL();
3845
3846 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
3847 if (!nlh)
3848 return -EMSGSIZE;
3849
3850 ifsm = nlmsg_data(nlh);
3851 ifsm->ifindex = dev->ifindex;
3852 ifsm->filter_mask = filter_mask;
3853
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02003854 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003855 struct rtnl_link_stats64 *sp;
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003856
Nicolas Dichtel58414d32016-04-21 18:58:25 +02003857 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
3858 sizeof(struct rtnl_link_stats64),
3859 IFLA_STATS_UNSPEC);
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003860 if (!attr)
3861 goto nla_put_failure;
3862
3863 sp = nla_data(attr);
3864 dev_get_stats(dev, sp);
3865 }
3866
Nikolay Aleksandrov97a47fa2016-04-30 10:25:27 +02003867 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
3868 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3869
3870 if (ops && ops->fill_linkxstats) {
Nikolay Aleksandrov97a47fa2016-04-30 10:25:27 +02003871 *idxattr = IFLA_STATS_LINK_XSTATS;
3872 attr = nla_nest_start(skb,
3873 IFLA_STATS_LINK_XSTATS);
3874 if (!attr)
3875 goto nla_put_failure;
3876
Nikolay Aleksandrov80e73cc2016-06-28 16:57:05 +02003877 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3878 nla_nest_end(skb, attr);
3879 if (err)
3880 goto nla_put_failure;
3881 *idxattr = 0;
3882 }
3883 }
3884
3885 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
3886 *idxattr)) {
3887 const struct rtnl_link_ops *ops = NULL;
3888 const struct net_device *master;
3889
3890 master = netdev_master_upper_dev_get(dev);
3891 if (master)
3892 ops = master->rtnl_link_ops;
3893 if (ops && ops->fill_linkxstats) {
Nikolay Aleksandrov80e73cc2016-06-28 16:57:05 +02003894 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
3895 attr = nla_nest_start(skb,
3896 IFLA_STATS_LINK_XSTATS_SLAVE);
3897 if (!attr)
3898 goto nla_put_failure;
3899
3900 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
Nikolay Aleksandrov97a47fa2016-04-30 10:25:27 +02003901 nla_nest_end(skb, attr);
3902 if (err)
3903 goto nla_put_failure;
3904 *idxattr = 0;
3905 }
3906 }
3907
Nogah Frankel69ae6ad2016-09-16 15:05:37 +02003908 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
3909 *idxattr)) {
3910 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
3911 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
3912 if (!attr)
3913 goto nla_put_failure;
3914
3915 err = rtnl_get_offload_stats(skb, dev, prividx);
3916 if (err == -ENODATA)
3917 nla_nest_cancel(skb, attr);
3918 else
3919 nla_nest_end(skb, attr);
3920
3921 if (err && err != -ENODATA)
3922 goto nla_put_failure;
3923 *idxattr = 0;
3924 }
3925
Robert Shearmanaefb4d42017-01-16 14:16:36 +00003926 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
3927 struct rtnl_af_ops *af_ops;
3928
3929 *idxattr = IFLA_STATS_AF_SPEC;
3930 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
3931 if (!attr)
3932 goto nla_put_failure;
3933
3934 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
3935 if (af_ops->fill_stats_af) {
3936 struct nlattr *af;
3937 int err;
3938
3939 af = nla_nest_start(skb, af_ops->family);
3940 if (!af)
3941 goto nla_put_failure;
3942
3943 err = af_ops->fill_stats_af(skb, dev);
3944
3945 if (err == -ENODATA)
3946 nla_nest_cancel(skb, af);
3947 else if (err < 0)
3948 goto nla_put_failure;
3949
3950 nla_nest_end(skb, af);
3951 }
3952 }
3953
3954 nla_nest_end(skb, attr);
3955
3956 *idxattr = 0;
3957 }
3958
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003959 nlmsg_end(skb, nlh);
3960
3961 return 0;
3962
3963nla_put_failure:
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02003964 /* not a multi message or no progress mean a real error */
3965 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
3966 nlmsg_cancel(skb, nlh);
3967 else
3968 nlmsg_end(skb, nlh);
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003969
3970 return -EMSGSIZE;
3971}
3972
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003973static size_t if_nlmsg_stats_size(const struct net_device *dev,
3974 u32 filter_mask)
3975{
3976 size_t size = 0;
3977
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02003978 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07003979 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
3980
Nikolay Aleksandrov97a47fa2016-04-30 10:25:27 +02003981 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
3982 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
Nikolay Aleksandrov80e73cc2016-06-28 16:57:05 +02003983 int attr = IFLA_STATS_LINK_XSTATS;
Nikolay Aleksandrov97a47fa2016-04-30 10:25:27 +02003984
3985 if (ops && ops->get_linkxstats_size) {
Nikolay Aleksandrov80e73cc2016-06-28 16:57:05 +02003986 size += nla_total_size(ops->get_linkxstats_size(dev,
3987 attr));
Nikolay Aleksandrov97a47fa2016-04-30 10:25:27 +02003988 /* for IFLA_STATS_LINK_XSTATS */
3989 size += nla_total_size(0);
3990 }
3991 }
3992
Nikolay Aleksandrov80e73cc2016-06-28 16:57:05 +02003993 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
3994 struct net_device *_dev = (struct net_device *)dev;
3995 const struct rtnl_link_ops *ops = NULL;
3996 const struct net_device *master;
3997
3998 /* netdev_master_upper_dev_get can't take const */
3999 master = netdev_master_upper_dev_get(_dev);
4000 if (master)
4001 ops = master->rtnl_link_ops;
4002 if (ops && ops->get_linkxstats_size) {
4003 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
4004
4005 size += nla_total_size(ops->get_linkxstats_size(dev,
4006 attr));
4007 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4008 size += nla_total_size(0);
4009 }
4010 }
4011
Nogah Frankel69ae6ad2016-09-16 15:05:37 +02004012 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
4013 size += rtnl_get_offload_stats_size(dev);
4014
Robert Shearmanaefb4d42017-01-16 14:16:36 +00004015 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
4016 struct rtnl_af_ops *af_ops;
4017
4018 /* for IFLA_STATS_AF_SPEC */
4019 size += nla_total_size(0);
4020
4021 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
4022 if (af_ops->get_stats_af_size) {
4023 size += nla_total_size(
4024 af_ops->get_stats_af_size(dev));
4025
4026 /* for AF_* */
4027 size += nla_total_size(0);
4028 }
4029 }
4030 }
4031
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004032 return size;
4033}
4034
David Ahernc21ef3e2017-04-16 09:48:24 -07004035static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
4036 struct netlink_ext_ack *extack)
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004037{
4038 struct net *net = sock_net(skb->sk);
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004039 struct net_device *dev = NULL;
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004040 int idxattr = 0, prividx = 0;
4041 struct if_stats_msg *ifsm;
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004042 struct sk_buff *nskb;
4043 u32 filter_mask;
4044 int err;
4045
Mathias Krause4775cc12016-12-28 17:52:15 +01004046 if (nlmsg_len(nlh) < sizeof(*ifsm))
4047 return -EINVAL;
4048
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004049 ifsm = nlmsg_data(nlh);
4050 if (ifsm->ifindex > 0)
4051 dev = __dev_get_by_index(net, ifsm->ifindex);
4052 else
4053 return -EINVAL;
4054
4055 if (!dev)
4056 return -ENODEV;
4057
4058 filter_mask = ifsm->filter_mask;
4059 if (!filter_mask)
4060 return -EINVAL;
4061
4062 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
4063 if (!nskb)
4064 return -ENOBUFS;
4065
4066 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
4067 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004068 0, filter_mask, &idxattr, &prividx);
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004069 if (err < 0) {
4070 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
4071 WARN_ON(err == -EMSGSIZE);
4072 kfree_skb(nskb);
4073 } else {
4074 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
4075 }
4076
4077 return err;
4078}
4079
4080static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
4081{
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004082 int h, s_h, err, s_idx, s_idxattr, s_prividx;
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004083 struct net *net = sock_net(skb->sk);
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004084 unsigned int flags = NLM_F_MULTI;
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004085 struct if_stats_msg *ifsm;
4086 struct hlist_head *head;
4087 struct net_device *dev;
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004088 u32 filter_mask = 0;
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004089 int idx = 0;
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004090
4091 s_h = cb->args[0];
4092 s_idx = cb->args[1];
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004093 s_idxattr = cb->args[2];
4094 s_prividx = cb->args[3];
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004095
4096 cb->seq = net->dev_base_seq;
4097
Mathias Krause4775cc12016-12-28 17:52:15 +01004098 if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
4099 return -EINVAL;
4100
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004101 ifsm = nlmsg_data(cb->nlh);
4102 filter_mask = ifsm->filter_mask;
4103 if (!filter_mask)
4104 return -EINVAL;
4105
4106 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4107 idx = 0;
4108 head = &net->dev_index_head[h];
4109 hlist_for_each_entry(dev, head, index_hlist) {
4110 if (idx < s_idx)
4111 goto cont;
4112 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
4113 NETLINK_CB(cb->skb).portid,
4114 cb->nlh->nlmsg_seq, 0,
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004115 flags, filter_mask,
4116 &s_idxattr, &s_prividx);
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004117 /* If we ran out of room on the first message,
4118 * we're in trouble
4119 */
4120 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
4121
4122 if (err < 0)
4123 goto out;
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004124 s_prividx = 0;
4125 s_idxattr = 0;
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004126 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4127cont:
4128 idx++;
4129 }
4130 }
4131out:
Nikolay Aleksandrove8872a22016-04-30 10:25:26 +02004132 cb->args[3] = s_prividx;
4133 cb->args[2] = s_idxattr;
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004134 cb->args[1] = idx;
4135 cb->args[0] = h;
4136
4137 return skb->len;
4138}
4139
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140/* Process one rtnetlink message. */
4141
Johannes Berg2d4bc932017-04-12 14:34:04 +02004142static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
4143 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09004145 struct net *net = sock_net(skb->sk);
Florian Westphal6853dd42017-08-09 20:41:51 +02004146 struct rtnl_link *handlers;
4147 int err = -EOPNOTSUPP;
Thomas Grafe2849862007-03-22 11:48:11 -07004148 rtnl_doit_func doit;
Florian Westphal62256f92017-08-09 20:41:52 +02004149 unsigned int flags;
Alexander Kuleshov617cfc72016-01-10 21:26:57 +06004150 int kind;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 int family;
4152 int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 type = nlh->nlmsg_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 if (type > RTM_MAX)
Thomas Graf038890f2007-04-05 14:35:52 -07004156 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157
4158 type -= RTM_BASE;
4159
4160 /* All the messages must have at least 1 byte length */
Hong zhi guo573ce262013-03-27 06:47:04 +00004161 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 return 0;
4163
Hong zhi guo573ce262013-03-27 06:47:04 +00004164 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 kind = type&3;
4166
Eric W. Biederman90f62cf2014-04-23 14:29:27 -07004167 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
Thomas Graf1d00a4e2007-03-22 23:30:12 -07004168 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169
Florian Westphal5c2bb9b2017-08-10 16:52:58 +02004170 if (family >= ARRAY_SIZE(rtnl_msg_handlers))
Florian Westphal6853dd42017-08-09 20:41:51 +02004171 family = PF_UNSPEC;
4172
4173 rcu_read_lock();
4174 handlers = rcu_dereference(rtnl_msg_handlers[family]);
4175 if (!handlers) {
4176 family = PF_UNSPEC;
4177 handlers = rcu_dereference(rtnl_msg_handlers[family]);
4178 }
4179
David S. Millerb8f3ab42011-01-18 12:40:38 -08004180 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004181 struct sock *rtnl;
Thomas Grafe2849862007-03-22 11:48:11 -07004182 rtnl_dumpit_func dumpit;
Greg Rosec7ac8672011-06-10 01:27:09 +00004183 u16 min_dump_alloc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184
Florian Westphal6853dd42017-08-09 20:41:51 +02004185 dumpit = READ_ONCE(handlers[type].dumpit);
4186 if (!dumpit) {
4187 family = PF_UNSPEC;
4188 handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
4189 if (!handlers)
4190 goto err_unlock;
Florian Westphal0cc09022017-08-09 20:41:50 +02004191
Florian Westphal6853dd42017-08-09 20:41:51 +02004192 dumpit = READ_ONCE(handlers[type].dumpit);
4193 if (!dumpit)
4194 goto err_unlock;
4195 }
Florian Westphale1fa6d22017-08-09 20:41:47 +02004196
Florian Westphal019a3162017-08-09 20:41:49 +02004197 refcount_inc(&rtnl_msg_handlers_ref[family]);
4198
Florian Westphal5c2bb9b2017-08-10 16:52:58 +02004199 if (type == RTM_GETLINK - RTM_BASE)
Florian Westphale1fa6d22017-08-09 20:41:47 +02004200 min_dump_alloc = rtnl_calcit(skb, nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201
Florian Westphal6853dd42017-08-09 20:41:51 +02004202 rcu_read_unlock();
4203
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004204 rtnl = net->rtnl;
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +00004205 {
4206 struct netlink_dump_control c = {
4207 .dump = dumpit,
4208 .min_dump_alloc = min_dump_alloc,
4209 };
4210 err = netlink_dump_start(rtnl, skb, nlh, &c);
4211 }
Florian Westphal019a3162017-08-09 20:41:49 +02004212 refcount_dec(&rtnl_msg_handlers_ref[family]);
Eric Dumazet2907c352011-05-25 07:34:04 +00004213 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 }
4215
Florian Westphal62256f92017-08-09 20:41:52 +02004216 flags = READ_ONCE(handlers[type].flags);
4217 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
4218 refcount_inc(&rtnl_msg_handlers_ref[family]);
4219 doit = READ_ONCE(handlers[type].doit);
4220 rcu_read_unlock();
4221 if (doit)
4222 err = doit(skb, nlh, extack);
4223 refcount_dec(&rtnl_msg_handlers_ref[family]);
4224 return err;
4225 }
4226
Florian Westphal6853dd42017-08-09 20:41:51 +02004227 rcu_read_unlock();
4228
Florian Westphal0cc09022017-08-09 20:41:50 +02004229 rtnl_lock();
Florian Westphal6853dd42017-08-09 20:41:51 +02004230 handlers = rtnl_dereference(rtnl_msg_handlers[family]);
4231 if (handlers) {
4232 doit = READ_ONCE(handlers[type].doit);
4233 if (doit)
4234 err = doit(skb, nlh, extack);
4235 }
Florian Westphal0cc09022017-08-09 20:41:50 +02004236 rtnl_unlock();
Florian Westphal0cc09022017-08-09 20:41:50 +02004237 return err;
4238
4239err_unlock:
Florian Westphal6853dd42017-08-09 20:41:51 +02004240 rcu_read_unlock();
Florian Westphal0cc09022017-08-09 20:41:50 +02004241 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242}
4243
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07004244static void rtnetlink_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245{
Denis V. Lunevcd40b7d2007-10-10 21:15:29 -07004246 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247}
4248
Julien Gomes5f729ea2017-06-20 13:54:16 -07004249static int rtnetlink_bind(struct net *net, int group)
4250{
4251 switch (group) {
4252 case RTNLGRP_IPV4_MROUTE_R:
4253 case RTNLGRP_IPV6_MROUTE_R:
4254 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4255 return -EPERM;
4256 break;
4257 }
4258 return 0;
4259}
4260
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
4262{
Jiri Pirko351638e2013-05-28 01:30:21 +00004263 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02004264
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265 switch (event) {
Vlad Yasevich5138e862017-04-04 09:23:41 -04004266 case NETDEV_REBOOT:
David Ahern37536542017-07-19 10:22:40 -07004267 case NETDEV_CHANGEADDR:
Vlad Yasevich5138e862017-04-04 09:23:41 -04004268 case NETDEV_CHANGENAME:
4269 case NETDEV_FEAT_CHANGE:
4270 case NETDEV_BONDING_FAILOVER:
Vlad Yasevich5138e862017-04-04 09:23:41 -04004271 case NETDEV_NOTIFY_PEERS:
Vlad Yasevich5138e862017-04-04 09:23:41 -04004272 case NETDEV_RESEND_IGMP:
Vlad Yasevich5138e862017-04-04 09:23:41 -04004273 case NETDEV_CHANGEINFODATA:
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04004274 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4275 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 break;
4277 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 break;
4279 }
4280 return NOTIFY_DONE;
4281}
4282
4283static struct notifier_block rtnetlink_dev_notifier = {
4284 .notifier_call = rtnetlink_event,
4285};
4286
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004287
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00004288static int __net_init rtnetlink_net_init(struct net *net)
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004289{
4290 struct sock *sk;
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00004291 struct netlink_kernel_cfg cfg = {
4292 .groups = RTNLGRP_MAX,
4293 .input = rtnetlink_rcv,
4294 .cb_mutex = &rtnl_mutex,
Pablo Neira Ayuso9785e102012-09-08 02:53:53 +00004295 .flags = NL_CFG_F_NONROOT_RECV,
Julien Gomes5f729ea2017-06-20 13:54:16 -07004296 .bind = rtnetlink_bind,
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +00004297 };
4298
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +00004299 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004300 if (!sk)
4301 return -ENOMEM;
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004302 net->rtnl = sk;
4303 return 0;
4304}
4305
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00004306static void __net_exit rtnetlink_net_exit(struct net *net)
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004307{
Denis V. Lunev775516b2008-01-18 23:55:19 -08004308 netlink_kernel_release(net->rtnl);
4309 net->rtnl = NULL;
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004310}
4311
4312static struct pernet_operations rtnetlink_net_ops = {
4313 .init = rtnetlink_net_init,
4314 .exit = rtnetlink_net_exit,
4315};
4316
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317void __init rtnetlink_init(void)
4318{
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004319 if (register_pernet_subsys(&rtnetlink_net_ops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320 panic("rtnetlink_init: cannot initialize rtnetlink\n");
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08004321
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 register_netdevice_notifier(&rtnetlink_dev_notifier);
Thomas Graf340d17f2007-03-22 11:49:22 -07004323
Greg Rosec7ac8672011-06-10 01:27:09 +00004324 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
Florian Westphalb97bac62017-08-09 20:41:48 +02004325 rtnl_dump_ifinfo, 0);
4326 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
4327 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
4328 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
Thomas Graf687ad8c2007-03-22 11:59:42 -07004329
Florian Westphalb97bac62017-08-09 20:41:48 +02004330 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
4331 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
4332 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
John Fastabend77162022012-04-15 06:43:56 +00004333
Florian Westphalb97bac62017-08-09 20:41:48 +02004334 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
4335 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
4336 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0);
John Fastabende5a55a82012-10-24 08:12:57 +00004337
Florian Westphalb97bac62017-08-09 20:41:48 +02004338 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
4339 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
4340 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
Roopa Prabhu10c9ead2016-04-20 08:43:43 -07004341
4342 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
Florian Westphalb97bac62017-08-09 20:41:48 +02004343 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344}