blob: e350c768d4b5a6c4fb5be0c0cae40f769e038373 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
tcharding722c9a02017-02-09 17:56:04 +11002 * NET3 Protocol independent device support routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
tcharding722c9a02017-02-09 17:56:04 +110010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
tcharding722c9a02017-02-09 17:56:04 +110024 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
tcharding722c9a02017-02-09 17:56:04 +110039 * Alan Cox : Fix ETH_P_ALL echoback lengths.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
tcharding722c9a02017-02-09 17:56:04 +110049 * Alan Cox : Fixed nasty side effect of device close
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
tcharding722c9a02017-02-09 17:56:04 +110070 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * - netif_rx() feedback
73 */
74
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080075#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070084#include <linux/sched/mm.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080085#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070095#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/notifier.h>
97#include <linux/skbuff.h>
Brenden Blancoa7862b42016-07-19 12:16:48 -070098#include <linux/bpf.h>
David S. Millerb5cdae32017-04-18 15:36:58 -040099#include <linux/bpf_trace.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200100#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -0800102#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700106#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/pkt_sched.h>
Jiri Pirko87d83092017-05-17 11:07:54 +0200108#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000110#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/highmem.h>
112#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500119#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700120#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700121#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700122#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700123#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700124#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700126#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700127#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700128#include <linux/ipv6.h>
129#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700130#include <linux/jhash.h>
131#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700132#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900133#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900134#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000135#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700136#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000137#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100138#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300139#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700140#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100141#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400142#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800143#include <linux/hrtimer.h>
Pablo Neirae687ad62015-05-13 18:19:38 +0200144#include <linux/netfilter_ingress.h>
Hariprasad Shenai40e4e712016-06-08 18:09:08 +0530145#include <linux/crash_dump.h>
Davide Carattib72b5bf2017-05-18 15:44:38 +0200146#include <linux/sctp.h>
Sabrina Dubrocaae847f42017-07-21 12:49:31 +0200147#include <net/udp_tunnel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700149#include "net-sysfs.h"
150
Herbert Xud565b0a2008-12-15 23:38:52 -0800151/* Instead of increasing this, you should create a hash table. */
152#define MAX_GRO_SKBS 8
153
Herbert Xu5d38a072009-01-04 16:13:40 -0800154/* This should be increased if a protocol with a bigger head is added. */
155#define GRO_MAX_HEAD (MAX_HEADER + 128)
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000158static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000159struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000161static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000163static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli54951192014-07-01 21:39:43 -0700164static int call_netdevice_notifiers_info(unsigned long val,
165 struct net_device *dev,
166 struct netdev_notifier_info *info);
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200167static struct napi_struct *napi_by_id(unsigned int napi_id);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 * semaphore.
172 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 *
175 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700176 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 * actual updates. This allows pure readers to access the list even
178 * while a writer is preparing to update it.
179 *
180 * To put it another way, dev_base_lock is held for writing only to
181 * protect against pure readers; the rtnl semaphore provides the
182 * protection against other writers.
183 *
184 * See, for example usages, register_netdevice() and
185 * unregister_netdevice(), which must be called with the rtnl
186 * semaphore held.
187 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189EXPORT_SYMBOL(dev_base_lock);
190
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300191/* protects napi_hash addition/deletion and napi_gen_id */
192static DEFINE_SPINLOCK(napi_hash_lock);
193
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800194static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800195static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300196
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200197static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000198
Thomas Graf4e985ad2011-06-21 03:11:20 +0000199static inline void dev_base_seq_inc(struct net *net)
200{
tcharding643aa9c2017-02-09 17:56:05 +1100201 while (++net->dev_base_seq == 0)
202 ;
Thomas Graf4e985ad2011-06-21 03:11:20 +0000203}
204
Eric W. Biederman881d9662007-09-17 11:56:21 -0700205static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Linus Torvalds8387ff22016-06-10 07:51:30 -0700207 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
Eric Dumazet95c96172012-04-15 05:58:06 +0000208
stephen hemminger08e98972009-11-10 07:20:34 +0000209 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
Eric W. Biederman881d9662007-09-17 11:56:21 -0700212static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700214 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000217static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000218{
219#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000220 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000221#endif
222}
223
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000224static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000225{
226#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000227 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000228#endif
229}
230
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000232static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200233{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900234 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200235
236 ASSERT_RTNL();
237
238 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800239 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000240 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000241 hlist_add_head_rcu(&dev->index_hlist,
242 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000244
245 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200246}
247
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000248/* Device list removal
249 * caller must respect a RCU grace period before freeing/reusing dev
250 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200251static void unlist_netdevice(struct net_device *dev)
252{
253 ASSERT_RTNL();
254
255 /* Unlink dev from the device chain */
256 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800257 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000258 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000259 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200260 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000261
262 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200263}
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265/*
266 * Our notifier list
267 */
268
Alan Sternf07d5b92006-05-09 15:23:03 -0700269static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271/*
272 * Device drivers call our routines to queue packets here. We empty the
273 * queue in the local softnet handler.
274 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700275
Eric Dumazet9958da02010-04-17 04:17:02 +0000276DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700277EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
David S. Millercf508b12008-07-22 14:16:42 -0700279#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700280/*
David S. Millerc773e842008-07-08 23:13:53 -0700281 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700282 * according to dev->type
283 */
tcharding643aa9c2017-02-09 17:56:05 +1100284static const unsigned short netdev_lock_type[] = {
285 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700286 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
287 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
288 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
289 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
290 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
291 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
292 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
293 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
294 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
295 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
296 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400297 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
298 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
299 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700300
tcharding643aa9c2017-02-09 17:56:05 +1100301static const char *const netdev_lock_name[] = {
302 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
303 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
304 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
305 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
306 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
307 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
308 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
309 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
310 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
311 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
312 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
313 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
314 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
315 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
316 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700317
318static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700319static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700320
321static inline unsigned short netdev_lock_pos(unsigned short dev_type)
322{
323 int i;
324
325 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
326 if (netdev_lock_type[i] == dev_type)
327 return i;
328 /* the last key is used by default */
329 return ARRAY_SIZE(netdev_lock_type) - 1;
330}
331
David S. Millercf508b12008-07-22 14:16:42 -0700332static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
333 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700334{
335 int i;
336
337 i = netdev_lock_pos(dev_type);
338 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
339 netdev_lock_name[i]);
340}
David S. Millercf508b12008-07-22 14:16:42 -0700341
342static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
343{
344 int i;
345
346 i = netdev_lock_pos(dev->type);
347 lockdep_set_class_and_name(&dev->addr_list_lock,
348 &netdev_addr_lock_key[i],
349 netdev_lock_name[i]);
350}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700351#else
David S. Millercf508b12008-07-22 14:16:42 -0700352static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
353 unsigned short dev_type)
354{
355}
356static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700357{
358}
359#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100362 *
363 * Protocol management and registration routines
364 *
365 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
368/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 * Add a protocol ID to the list. Now that the input handler is
370 * smarter we can dispense with all the messy stuff that used to be
371 * here.
372 *
373 * BEWARE!!! Protocol handlers, mangling input packets,
374 * MUST BE last in hash buckets and checking protocol handlers
375 * MUST start from promiscuous ptype_all chain in net_bh.
376 * It is true now, do not change it.
377 * Explanation follows: if protocol handler, mangling packet, will
378 * be the first on list, it is not able to sense, that packet
379 * is cloned and should be copied-on-write, so that it will
380 * change it and subsequent readers will get broken packet.
381 * --ANK (980803)
382 */
383
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000384static inline struct list_head *ptype_head(const struct packet_type *pt)
385{
386 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800387 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000388 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800389 return pt->dev ? &pt->dev->ptype_specific :
390 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000391}
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393/**
394 * dev_add_pack - add packet handler
395 * @pt: packet type declaration
396 *
397 * Add a protocol handler to the networking stack. The passed &packet_type
398 * is linked into kernel lists and may not be freed until it has been
399 * removed from the kernel lists.
400 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900401 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 * guarantee all CPU's that are in middle of receiving packets
403 * will see the new packet type (until the next received packet).
404 */
405
406void dev_add_pack(struct packet_type *pt)
407{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000408 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000410 spin_lock(&ptype_lock);
411 list_add_rcu(&pt->list, head);
412 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700414EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416/**
417 * __dev_remove_pack - remove packet handler
418 * @pt: packet type declaration
419 *
420 * Remove a protocol handler that was previously added to the kernel
421 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
422 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900423 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 *
425 * The packet type might still be in use by receivers
426 * and must not be freed until after all the CPU's have gone
427 * through a quiescent state.
428 */
429void __dev_remove_pack(struct packet_type *pt)
430{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000431 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 struct packet_type *pt1;
433
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000434 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 list_for_each_entry(pt1, head, list) {
437 if (pt == pt1) {
438 list_del_rcu(&pt->list);
439 goto out;
440 }
441 }
442
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000443 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000445 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700447EXPORT_SYMBOL(__dev_remove_pack);
448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449/**
450 * dev_remove_pack - remove packet handler
451 * @pt: packet type declaration
452 *
453 * Remove a protocol handler that was previously added to the kernel
454 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
455 * from the kernel lists and can be freed or reused once this function
456 * returns.
457 *
458 * This call sleeps to guarantee that no CPU is looking at the packet
459 * type after return.
460 */
461void dev_remove_pack(struct packet_type *pt)
462{
463 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 synchronize_net();
466}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700467EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Vlad Yasevich62532da2012-11-15 08:49:10 +0000469
470/**
471 * dev_add_offload - register offload handlers
472 * @po: protocol offload declaration
473 *
474 * Add protocol offload handlers to the networking stack. The passed
475 * &proto_offload is linked into kernel lists and may not be freed until
476 * it has been removed from the kernel lists.
477 *
478 * This call does not sleep therefore it can not
479 * guarantee all CPU's that are in middle of receiving packets
480 * will see the new offload handlers (until the next received packet).
481 */
482void dev_add_offload(struct packet_offload *po)
483{
David S. Millerbdef7de2015-06-01 14:56:09 -0700484 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000485
486 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700487 list_for_each_entry(elem, &offload_base, list) {
488 if (po->priority < elem->priority)
489 break;
490 }
491 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000492 spin_unlock(&offload_lock);
493}
494EXPORT_SYMBOL(dev_add_offload);
495
496/**
497 * __dev_remove_offload - remove offload handler
498 * @po: packet offload declaration
499 *
500 * Remove a protocol offload handler that was previously added to the
501 * kernel offload handlers by dev_add_offload(). The passed &offload_type
502 * is removed from the kernel lists and can be freed or reused once this
503 * function returns.
504 *
505 * The packet type might still be in use by receivers
506 * and must not be freed until after all the CPU's have gone
507 * through a quiescent state.
508 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800509static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000510{
511 struct list_head *head = &offload_base;
512 struct packet_offload *po1;
513
Eric Dumazetc53aa502012-11-16 08:08:23 +0000514 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000515
516 list_for_each_entry(po1, head, list) {
517 if (po == po1) {
518 list_del_rcu(&po->list);
519 goto out;
520 }
521 }
522
523 pr_warn("dev_remove_offload: %p not found\n", po);
524out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000525 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000526}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000527
528/**
529 * dev_remove_offload - remove packet offload handler
530 * @po: packet offload declaration
531 *
532 * Remove a packet offload handler that was previously added to the kernel
533 * offload handlers by dev_add_offload(). The passed &offload_type is
534 * removed from the kernel lists and can be freed or reused once this
535 * function returns.
536 *
537 * This call sleeps to guarantee that no CPU is looking at the packet
538 * type after return.
539 */
540void dev_remove_offload(struct packet_offload *po)
541{
542 __dev_remove_offload(po);
543
544 synchronize_net();
545}
546EXPORT_SYMBOL(dev_remove_offload);
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548/******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100549 *
550 * Device Boot-time Settings Routines
551 *
552 ******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554/* Boot time configuration table */
555static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
556
557/**
558 * netdev_boot_setup_add - add new setup entry
559 * @name: name of the device
560 * @map: configured settings for the device
561 *
562 * Adds new setup entry to the dev_boot_setup list. The function
563 * returns 0 on error and 1 on success. This is a generic routine to
564 * all netdevices.
565 */
566static int netdev_boot_setup_add(char *name, struct ifmap *map)
567{
568 struct netdev_boot_setup *s;
569 int i;
570
571 s = dev_boot_setup;
572 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
573 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
574 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700575 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 memcpy(&s[i].map, map, sizeof(s[i].map));
577 break;
578 }
579 }
580
581 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
582}
583
584/**
tcharding722c9a02017-02-09 17:56:04 +1100585 * netdev_boot_setup_check - check boot time settings
586 * @dev: the netdevice
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 *
tcharding722c9a02017-02-09 17:56:04 +1100588 * Check boot time settings for the device.
589 * The found settings are set for the device to be used
590 * later in the device probing.
591 * Returns 0 if no settings found, 1 if they are.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 */
593int netdev_boot_setup_check(struct net_device *dev)
594{
595 struct netdev_boot_setup *s = dev_boot_setup;
596 int i;
597
598 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
599 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700600 !strcmp(dev->name, s[i].name)) {
tcharding722c9a02017-02-09 17:56:04 +1100601 dev->irq = s[i].map.irq;
602 dev->base_addr = s[i].map.base_addr;
603 dev->mem_start = s[i].map.mem_start;
604 dev->mem_end = s[i].map.mem_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 return 1;
606 }
607 }
608 return 0;
609}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700610EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612
613/**
tcharding722c9a02017-02-09 17:56:04 +1100614 * netdev_boot_base - get address from boot time settings
615 * @prefix: prefix for network device
616 * @unit: id for network device
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 *
tcharding722c9a02017-02-09 17:56:04 +1100618 * Check boot time settings for the base address of device.
619 * The found settings are set for the device to be used
620 * later in the device probing.
621 * Returns 0 if no settings found.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 */
623unsigned long netdev_boot_base(const char *prefix, int unit)
624{
625 const struct netdev_boot_setup *s = dev_boot_setup;
626 char name[IFNAMSIZ];
627 int i;
628
629 sprintf(name, "%s%d", prefix, unit);
630
631 /*
632 * If device already registered then return base of 1
633 * to indicate not to probe for this interface
634 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700635 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 return 1;
637
638 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
639 if (!strcmp(name, s[i].name))
640 return s[i].map.base_addr;
641 return 0;
642}
643
644/*
645 * Saves at boot time configured settings for any netdevice.
646 */
647int __init netdev_boot_setup(char *str)
648{
649 int ints[5];
650 struct ifmap map;
651
652 str = get_options(str, ARRAY_SIZE(ints), ints);
653 if (!str || !*str)
654 return 0;
655
656 /* Save settings */
657 memset(&map, 0, sizeof(map));
658 if (ints[0] > 0)
659 map.irq = ints[1];
660 if (ints[0] > 1)
661 map.base_addr = ints[2];
662 if (ints[0] > 2)
663 map.mem_start = ints[3];
664 if (ints[0] > 3)
665 map.mem_end = ints[4];
666
667 /* Add new entry to the list */
668 return netdev_boot_setup_add(str, &map);
669}
670
671__setup("netdev=", netdev_boot_setup);
672
673/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100674 *
675 * Device Interface Subroutines
676 *
677 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200680 * dev_get_iflink - get 'iflink' value of a interface
681 * @dev: targeted interface
682 *
683 * Indicates the ifindex the interface is linked to.
684 * Physical interfaces have the same 'ifindex' and 'iflink' values.
685 */
686
687int dev_get_iflink(const struct net_device *dev)
688{
689 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
690 return dev->netdev_ops->ndo_get_iflink(dev);
691
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200692 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200693}
694EXPORT_SYMBOL(dev_get_iflink);
695
696/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700697 * dev_fill_metadata_dst - Retrieve tunnel egress information.
698 * @dev: targeted interface
699 * @skb: The packet.
700 *
701 * For better visibility of tunnel traffic OVS needs to retrieve
702 * egress tunnel information for a packet. Following API allows
703 * user to get this info.
704 */
705int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
706{
707 struct ip_tunnel_info *info;
708
709 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
710 return -EINVAL;
711
712 info = skb_tunnel_info_unclone(skb);
713 if (!info)
714 return -ENOMEM;
715 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
716 return -EINVAL;
717
718 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
719}
720EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
721
722/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700724 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 * @name: name to find
726 *
727 * Find an interface by name. Must be called under RTNL semaphore
728 * or @dev_base_lock. If the name is found a pointer to the device
729 * is returned. If the name is not found then %NULL is returned. The
730 * reference counters are not incremented so the caller must be
731 * careful with locks.
732 */
733
Eric W. Biederman881d9662007-09-17 11:56:21 -0700734struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700736 struct net_device *dev;
737 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Sasha Levinb67bfe02013-02-27 17:06:00 -0800739 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 if (!strncmp(dev->name, name, IFNAMSIZ))
741 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 return NULL;
744}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700745EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
747/**
tcharding722c9a02017-02-09 17:56:04 +1100748 * dev_get_by_name_rcu - find a device by its name
749 * @net: the applicable net namespace
750 * @name: name to find
Eric Dumazet72c95282009-10-30 07:11:27 +0000751 *
tcharding722c9a02017-02-09 17:56:04 +1100752 * Find an interface by name.
753 * If the name is found a pointer to the device is returned.
754 * If the name is not found then %NULL is returned.
755 * The reference counters are not incremented so the caller must be
756 * careful with locks. The caller must hold RCU lock.
Eric Dumazet72c95282009-10-30 07:11:27 +0000757 */
758
759struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
760{
Eric Dumazet72c95282009-10-30 07:11:27 +0000761 struct net_device *dev;
762 struct hlist_head *head = dev_name_hash(net, name);
763
Sasha Levinb67bfe02013-02-27 17:06:00 -0800764 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000765 if (!strncmp(dev->name, name, IFNAMSIZ))
766 return dev;
767
768 return NULL;
769}
770EXPORT_SYMBOL(dev_get_by_name_rcu);
771
772/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700774 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 * @name: name to find
776 *
777 * Find an interface by name. This can be called from any
778 * context and does its own locking. The returned handle has
779 * the usage count incremented and the caller must use dev_put() to
780 * release it when it is no longer needed. %NULL is returned if no
781 * matching device is found.
782 */
783
Eric W. Biederman881d9662007-09-17 11:56:21 -0700784struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785{
786 struct net_device *dev;
787
Eric Dumazet72c95282009-10-30 07:11:27 +0000788 rcu_read_lock();
789 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 if (dev)
791 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000792 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 return dev;
794}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700795EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797/**
798 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700799 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 * @ifindex: index of device
801 *
802 * Search for an interface by index. Returns %NULL if the device
803 * is not found or a pointer to the device. The device has not
804 * had its reference counter increased so the caller must be careful
805 * about locking. The caller must hold either the RTNL semaphore
806 * or @dev_base_lock.
807 */
808
Eric W. Biederman881d9662007-09-17 11:56:21 -0700809struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700811 struct net_device *dev;
812 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Sasha Levinb67bfe02013-02-27 17:06:00 -0800814 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 if (dev->ifindex == ifindex)
816 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 return NULL;
819}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700820EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000822/**
823 * dev_get_by_index_rcu - find a device by its ifindex
824 * @net: the applicable net namespace
825 * @ifindex: index of device
826 *
827 * Search for an interface by index. Returns %NULL if the device
828 * is not found or a pointer to the device. The device has not
829 * had its reference counter increased so the caller must be careful
830 * about locking. The caller must hold RCU lock.
831 */
832
833struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
834{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000835 struct net_device *dev;
836 struct hlist_head *head = dev_index_hash(net, ifindex);
837
Sasha Levinb67bfe02013-02-27 17:06:00 -0800838 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000839 if (dev->ifindex == ifindex)
840 return dev;
841
842 return NULL;
843}
844EXPORT_SYMBOL(dev_get_by_index_rcu);
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847/**
848 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700849 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 * @ifindex: index of device
851 *
852 * Search for an interface by index. Returns NULL if the device
853 * is not found or a pointer to the device. The device returned has
854 * had a reference added and the pointer is safe until the user calls
855 * dev_put to indicate they have finished with it.
856 */
857
Eric W. Biederman881d9662007-09-17 11:56:21 -0700858struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
860 struct net_device *dev;
861
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000862 rcu_read_lock();
863 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 if (dev)
865 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000866 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return dev;
868}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700869EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871/**
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200872 * dev_get_by_napi_id - find a device by napi_id
873 * @napi_id: ID of the NAPI struct
874 *
875 * Search for an interface by NAPI ID. Returns %NULL if the device
876 * is not found or a pointer to the device. The device has not had
877 * its reference counter increased so the caller must be careful
878 * about locking. The caller must hold RCU lock.
879 */
880
881struct net_device *dev_get_by_napi_id(unsigned int napi_id)
882{
883 struct napi_struct *napi;
884
885 WARN_ON_ONCE(!rcu_read_lock_held());
886
887 if (napi_id < MIN_NAPI_ID)
888 return NULL;
889
890 napi = napi_by_id(napi_id);
891
892 return napi ? napi->dev : NULL;
893}
894EXPORT_SYMBOL(dev_get_by_napi_id);
895
896/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200897 * netdev_get_name - get a netdevice name, knowing its ifindex.
898 * @net: network namespace
899 * @name: a pointer to the buffer where the name will be stored.
900 * @ifindex: the ifindex of the interface to get the name from.
901 *
902 * The use of raw_seqcount_begin() and cond_resched() before
903 * retrying is required as we want to give the writers a chance
904 * to complete when CONFIG_PREEMPT is not set.
905 */
906int netdev_get_name(struct net *net, char *name, int ifindex)
907{
908 struct net_device *dev;
909 unsigned int seq;
910
911retry:
912 seq = raw_seqcount_begin(&devnet_rename_seq);
913 rcu_read_lock();
914 dev = dev_get_by_index_rcu(net, ifindex);
915 if (!dev) {
916 rcu_read_unlock();
917 return -ENODEV;
918 }
919
920 strcpy(name, dev->name);
921 rcu_read_unlock();
922 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
923 cond_resched();
924 goto retry;
925 }
926
927 return 0;
928}
929
930/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000931 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700932 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 * @type: media type of device
934 * @ha: hardware address
935 *
936 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800937 * is not found or a pointer to the device.
938 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000939 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 * and the caller must therefore be careful about locking
941 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 */
943
Eric Dumazet941666c2010-12-05 01:23:53 +0000944struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
945 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
947 struct net_device *dev;
948
Eric Dumazet941666c2010-12-05 01:23:53 +0000949 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if (dev->type == type &&
951 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700952 return dev;
953
954 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955}
Eric Dumazet941666c2010-12-05 01:23:53 +0000956EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300957
Eric W. Biederman881d9662007-09-17 11:56:21 -0700958struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700959{
960 struct net_device *dev;
961
962 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700963 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700964 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700965 return dev;
966
967 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700968}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700969EXPORT_SYMBOL(__dev_getfirstbyhwtype);
970
Eric W. Biederman881d9662007-09-17 11:56:21 -0700971struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000973 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000975 rcu_read_lock();
976 for_each_netdev_rcu(net, dev)
977 if (dev->type == type) {
978 dev_hold(dev);
979 ret = dev;
980 break;
981 }
982 rcu_read_unlock();
983 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985EXPORT_SYMBOL(dev_getfirstbyhwtype);
986
987/**
WANG Cong6c555492014-09-11 15:35:09 -0700988 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700989 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 * @if_flags: IFF_* values
991 * @mask: bitmask of bits in if_flags to check
992 *
993 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000994 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700995 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 */
997
WANG Cong6c555492014-09-11 15:35:09 -0700998struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
999 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000{
Pavel Emelianov7562f872007-05-03 15:13:45 -07001001 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
WANG Cong6c555492014-09-11 15:35:09 -07001003 ASSERT_RTNL();
1004
Pavel Emelianov7562f872007-05-03 15:13:45 -07001005 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -07001006 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -07001008 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 break;
1010 }
1011 }
Pavel Emelianov7562f872007-05-03 15:13:45 -07001012 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013}
WANG Cong6c555492014-09-11 15:35:09 -07001014EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
1016/**
1017 * dev_valid_name - check if name is okay for network device
1018 * @name: name string
1019 *
1020 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -07001021 * to allow sysfs to work. We also disallow any kind of
1022 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 */
David S. Miller95f050b2012-03-06 16:12:15 -05001024bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
David S. Millerc7fa9d12006-08-15 16:34:13 -07001026 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -05001027 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07001028 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -05001029 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001030 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -05001031 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001032
1033 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -06001034 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001035 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001036 name++;
1037 }
David S. Miller95f050b2012-03-06 16:12:15 -05001038 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001040EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
1042/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001043 * __dev_alloc_name - allocate a name for a device
1044 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001046 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 *
1048 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001049 * id. It scans list of devices to build up a free map, then chooses
1050 * the first empty slot. The caller must hold the dev_base or rtnl lock
1051 * while allocating the name and adding the device in order to avoid
1052 * duplicates.
1053 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1054 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 */
1056
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001057static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058{
1059 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 const char *p;
1061 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001062 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 struct net_device *d;
1064
1065 p = strnchr(name, IFNAMSIZ-1, '%');
1066 if (p) {
1067 /*
1068 * Verify the string as this thing may have come from
1069 * the user. There must be either one "%d" and no other "%"
1070 * characters.
1071 */
1072 if (p[1] != 'd' || strchr(p + 2, '%'))
1073 return -EINVAL;
1074
1075 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001076 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 if (!inuse)
1078 return -ENOMEM;
1079
Eric W. Biederman881d9662007-09-17 11:56:21 -07001080 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if (!sscanf(d->name, name, &i))
1082 continue;
1083 if (i < 0 || i >= max_netdevices)
1084 continue;
1085
1086 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001087 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 if (!strncmp(buf, d->name, IFNAMSIZ))
1089 set_bit(i, inuse);
1090 }
1091
1092 i = find_first_zero_bit(inuse, max_netdevices);
1093 free_page((unsigned long) inuse);
1094 }
1095
Octavian Purdilad9031022009-11-18 02:36:59 +00001096 if (buf != name)
1097 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001098 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 /* It is possible to run out of possible slots
1102 * when the name is long and there isn't enough space left
1103 * for the digits, or if all bits are used.
1104 */
1105 return -ENFILE;
1106}
1107
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001108/**
1109 * dev_alloc_name - allocate a name for a device
1110 * @dev: device
1111 * @name: name format string
1112 *
1113 * Passed a format string - eg "lt%d" it will try and find a suitable
1114 * id. It scans list of devices to build up a free map, then chooses
1115 * the first empty slot. The caller must hold the dev_base or rtnl lock
1116 * while allocating the name and adding the device in order to avoid
1117 * duplicates.
1118 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1119 * Returns the number of the unit assigned or a negative errno code.
1120 */
1121
1122int dev_alloc_name(struct net_device *dev, const char *name)
1123{
1124 char buf[IFNAMSIZ];
1125 struct net *net;
1126 int ret;
1127
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001128 BUG_ON(!dev_net(dev));
1129 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001130 ret = __dev_alloc_name(net, name, buf);
1131 if (ret >= 0)
1132 strlcpy(dev->name, buf, IFNAMSIZ);
1133 return ret;
1134}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001135EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001136
Gao feng828de4f2012-09-13 20:58:27 +00001137static int dev_alloc_name_ns(struct net *net,
1138 struct net_device *dev,
1139 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001140{
Gao feng828de4f2012-09-13 20:58:27 +00001141 char buf[IFNAMSIZ];
1142 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001143
Gao feng828de4f2012-09-13 20:58:27 +00001144 ret = __dev_alloc_name(net, name, buf);
1145 if (ret >= 0)
1146 strlcpy(dev->name, buf, IFNAMSIZ);
1147 return ret;
1148}
1149
1150static int dev_get_valid_name(struct net *net,
1151 struct net_device *dev,
1152 const char *name)
1153{
1154 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001155
Octavian Purdilad9031022009-11-18 02:36:59 +00001156 if (!dev_valid_name(name))
1157 return -EINVAL;
1158
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001159 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001160 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001161 else if (__dev_get_by_name(net, name))
1162 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001163 else if (dev->name != name)
1164 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001165
1166 return 0;
1167}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169/**
1170 * dev_change_name - change name of a device
1171 * @dev: device
1172 * @newname: name (or format string) must be at least IFNAMSIZ
1173 *
1174 * Change name of a device, can pass format strings "eth%d".
1175 * for wildcarding.
1176 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001177int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178{
Tom Gundersen238fa362014-07-14 16:37:23 +02001179 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001180 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001182 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001183 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
1185 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001186 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001188 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 if (dev->flags & IFF_UP)
1190 return -EBUSY;
1191
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001192 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001193
1194 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001195 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001196 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001197 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001198
Herbert Xufcc5a032007-07-30 17:03:38 -07001199 memcpy(oldname, dev->name, IFNAMSIZ);
1200
Gao feng828de4f2012-09-13 20:58:27 +00001201 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001202 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001203 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001204 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001207 if (oldname[0] && !strchr(oldname, '%'))
1208 netdev_info(dev, "renamed from %s\n", oldname);
1209
Tom Gundersen238fa362014-07-14 16:37:23 +02001210 old_assign_type = dev->name_assign_type;
1211 dev->name_assign_type = NET_NAME_RENAMED;
1212
Herbert Xufcc5a032007-07-30 17:03:38 -07001213rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001214 ret = device_rename(&dev->dev, dev->name);
1215 if (ret) {
1216 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001217 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001218 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001219 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001220 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001221
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001222 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001223
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001224 netdev_adjacent_rename_links(dev, oldname);
1225
Herbert Xu7f988ea2007-07-30 16:35:46 -07001226 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001227 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001228 write_unlock_bh(&dev_base_lock);
1229
1230 synchronize_rcu();
1231
1232 write_lock_bh(&dev_base_lock);
1233 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001234 write_unlock_bh(&dev_base_lock);
1235
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001236 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001237 ret = notifier_to_errno(ret);
1238
1239 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001240 /* err >= 0 after dev_alloc_name() or stores the first errno */
1241 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001242 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001243 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001244 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001245 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001246 dev->name_assign_type = old_assign_type;
1247 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001248 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001249 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001250 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001251 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001252 }
1253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
1255 return err;
1256}
1257
1258/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001259 * dev_set_alias - change ifalias of a device
1260 * @dev: device
1261 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001262 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001263 *
1264 * Set ifalias for a device,
1265 */
1266int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1267{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001268 char *new_ifalias;
1269
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001270 ASSERT_RTNL();
1271
1272 if (len >= IFALIASZ)
1273 return -EINVAL;
1274
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001275 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001276 kfree(dev->ifalias);
1277 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001278 return 0;
1279 }
1280
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001281 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1282 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001283 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001284 dev->ifalias = new_ifalias;
Alexander Potapenkoc28294b2017-06-06 15:56:54 +02001285 memcpy(dev->ifalias, alias, len);
1286 dev->ifalias[len] = 0;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001287
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001288 return len;
1289}
1290
1291
1292/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001293 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001294 * @dev: device to cause notification
1295 *
1296 * Called to indicate a device has changed features.
1297 */
1298void netdev_features_change(struct net_device *dev)
1299{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001300 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001301}
1302EXPORT_SYMBOL(netdev_features_change);
1303
1304/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 * netdev_state_change - device changes state
1306 * @dev: device to cause notification
1307 *
1308 * Called to indicate a device has changed state. This function calls
1309 * the notifier chains for netdev_chain and sends a NEWLINK message
1310 * to the routing socket.
1311 */
1312void netdev_state_change(struct net_device *dev)
1313{
1314 if (dev->flags & IFF_UP) {
Loic Prylli54951192014-07-01 21:39:43 -07001315 struct netdev_notifier_change_info change_info;
1316
1317 change_info.flags_changed = 0;
1318 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1319 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001320 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 }
1322}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001323EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Amerigo Wangee89bab2012-08-09 22:14:56 +00001325/**
tcharding722c9a02017-02-09 17:56:04 +11001326 * netdev_notify_peers - notify network peers about existence of @dev
1327 * @dev: network device
Amerigo Wangee89bab2012-08-09 22:14:56 +00001328 *
1329 * Generate traffic such that interested network peers are aware of
1330 * @dev, such as by generating a gratuitous ARP. This may be used when
1331 * a device wants to inform the rest of the network about some sort of
1332 * reconfiguration such as a failover event or virtual machine
1333 * migration.
1334 */
1335void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001336{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001337 rtnl_lock();
1338 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
Vlad Yasevich37c343b2017-03-14 08:58:08 -04001339 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00001340 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001341}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001342EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001343
Patrick McHardybd380812010-02-26 06:34:53 +00001344static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001346 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001347 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001349 ASSERT_RTNL();
1350
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 if (!netif_device_present(dev))
1352 return -ENODEV;
1353
Neil Hormanca99ca12013-02-05 08:05:43 +00001354 /* Block netpoll from trying to do any rx path servicing.
1355 * If we don't do this there is a chance ndo_poll_controller
1356 * or ndo_poll may be running while we open the device
1357 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001358 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001359
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001360 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1361 ret = notifier_to_errno(ret);
1362 if (ret)
1363 return ret;
1364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001366
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001367 if (ops->ndo_validate_addr)
1368 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001369
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001370 if (!ret && ops->ndo_open)
1371 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
Eric W. Biederman66b55522014-03-27 15:39:03 -07001373 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001374
Jeff Garzikbada3392007-10-23 20:19:37 -07001375 if (ret)
1376 clear_bit(__LINK_STATE_START, &dev->state);
1377 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001379 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001381 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001383
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 return ret;
1385}
Patrick McHardybd380812010-02-26 06:34:53 +00001386
1387/**
1388 * dev_open - prepare an interface for use.
1389 * @dev: device to open
1390 *
1391 * Takes a device from down to up state. The device's private open
1392 * function is invoked and then the multicast lists are loaded. Finally
1393 * the device is moved into the up state and a %NETDEV_UP message is
1394 * sent to the netdev notifier chain.
1395 *
1396 * Calling this function on an active interface is a nop. On a failure
1397 * a negative errno code is returned.
1398 */
1399int dev_open(struct net_device *dev)
1400{
1401 int ret;
1402
Patrick McHardybd380812010-02-26 06:34:53 +00001403 if (dev->flags & IFF_UP)
1404 return 0;
1405
Patrick McHardybd380812010-02-26 06:34:53 +00001406 ret = __dev_open(dev);
1407 if (ret < 0)
1408 return ret;
1409
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001410 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001411 call_netdevice_notifiers(NETDEV_UP, dev);
1412
1413 return ret;
1414}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001415EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
stephen hemminger7051b882017-07-18 15:59:27 -07001417static void __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418{
Octavian Purdila44345722010-12-13 12:44:07 +00001419 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001420
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001421 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001422 might_sleep();
1423
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001424 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001425 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001426 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001427
Octavian Purdila44345722010-12-13 12:44:07 +00001428 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
Octavian Purdila44345722010-12-13 12:44:07 +00001430 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
Octavian Purdila44345722010-12-13 12:44:07 +00001432 /* Synchronize to scheduled poll. We cannot touch poll list, it
1433 * can be even on different cpu. So just clear netif_running().
1434 *
1435 * dev->stop() will invoke napi_disable() on all of it's
1436 * napi_struct instances on this device.
1437 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001438 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
Octavian Purdila44345722010-12-13 12:44:07 +00001441 dev_deactivate_many(head);
1442
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001443 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001444 const struct net_device_ops *ops = dev->netdev_ops;
1445
1446 /*
1447 * Call the device specific close. This cannot fail.
1448 * Only if device is UP
1449 *
1450 * We allow it to be called even after a DETACH hot-plug
1451 * event.
1452 */
1453 if (ops->ndo_stop)
1454 ops->ndo_stop(dev);
1455
Octavian Purdila44345722010-12-13 12:44:07 +00001456 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001457 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001458 }
Octavian Purdila44345722010-12-13 12:44:07 +00001459}
1460
stephen hemminger7051b882017-07-18 15:59:27 -07001461static void __dev_close(struct net_device *dev)
Octavian Purdila44345722010-12-13 12:44:07 +00001462{
1463 LIST_HEAD(single);
1464
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001465 list_add(&dev->close_list, &single);
stephen hemminger7051b882017-07-18 15:59:27 -07001466 __dev_close_many(&single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001467 list_del(&single);
Octavian Purdila44345722010-12-13 12:44:07 +00001468}
1469
stephen hemminger7051b882017-07-18 15:59:27 -07001470void dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001471{
1472 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001473
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001474 /* Remove the devices that don't need to be closed */
1475 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001476 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001477 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001478
1479 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001480
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001481 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001482 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001483 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001484 if (unlink)
1485 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001486 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487}
David S. Miller99c4a262015-03-18 22:52:33 -04001488EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001489
1490/**
1491 * dev_close - shutdown an interface.
1492 * @dev: device to shutdown
1493 *
1494 * This function moves an active device into down state. A
1495 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1496 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1497 * chain.
1498 */
stephen hemminger7051b882017-07-18 15:59:27 -07001499void dev_close(struct net_device *dev)
Patrick McHardybd380812010-02-26 06:34:53 +00001500{
Eric Dumazete14a5992011-05-10 12:26:06 -07001501 if (dev->flags & IFF_UP) {
1502 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001503
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001504 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001505 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001506 list_del(&single);
1507 }
Patrick McHardybd380812010-02-26 06:34:53 +00001508}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001509EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510
1511
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001512/**
1513 * dev_disable_lro - disable Large Receive Offload on a device
1514 * @dev: device
1515 *
1516 * Disable Large Receive Offload (LRO) on a net device. Must be
1517 * called under RTNL. This is needed if received packets may be
1518 * forwarded to another interface.
1519 */
1520void dev_disable_lro(struct net_device *dev)
1521{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001522 struct net_device *lower_dev;
1523 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001524
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001525 dev->wanted_features &= ~NETIF_F_LRO;
1526 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001527
Michał Mirosław22d59692011-04-21 12:42:15 +00001528 if (unlikely(dev->features & NETIF_F_LRO))
1529 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001530
1531 netdev_for_each_lower_dev(dev, lower_dev, iter)
1532 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001533}
1534EXPORT_SYMBOL(dev_disable_lro);
1535
Jiri Pirko351638e2013-05-28 01:30:21 +00001536static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1537 struct net_device *dev)
1538{
1539 struct netdev_notifier_info info;
1540
1541 netdev_notifier_info_init(&info, dev);
1542 return nb->notifier_call(nb, val, &info);
1543}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001544
Eric W. Biederman881d9662007-09-17 11:56:21 -07001545static int dev_boot_phase = 1;
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547/**
tcharding722c9a02017-02-09 17:56:04 +11001548 * register_netdevice_notifier - register a network notifier block
1549 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 *
tcharding722c9a02017-02-09 17:56:04 +11001551 * Register a notifier to be called when network device events occur.
1552 * The notifier passed is linked into the kernel structures and must
1553 * not be reused until it has been unregistered. A negative errno code
1554 * is returned on a failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 *
tcharding722c9a02017-02-09 17:56:04 +11001556 * When registered all registration and up events are replayed
1557 * to the new notifier to allow device to have a race free
1558 * view of the network device list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 */
1560
1561int register_netdevice_notifier(struct notifier_block *nb)
1562{
1563 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001564 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001565 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 int err;
1567
1568 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001569 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001570 if (err)
1571 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001572 if (dev_boot_phase)
1573 goto unlock;
1574 for_each_net(net) {
1575 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001576 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001577 err = notifier_to_errno(err);
1578 if (err)
1579 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Eric W. Biederman881d9662007-09-17 11:56:21 -07001581 if (!(dev->flags & IFF_UP))
1582 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001583
Jiri Pirko351638e2013-05-28 01:30:21 +00001584 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001585 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001587
1588unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 rtnl_unlock();
1590 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001591
1592rollback:
1593 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001594 for_each_net(net) {
1595 for_each_netdev(net, dev) {
1596 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001597 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001598
Eric W. Biederman881d9662007-09-17 11:56:21 -07001599 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001600 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1601 dev);
1602 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001603 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001604 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001605 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001606 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001607
RongQing.Li8f891482011-11-30 23:43:07 -05001608outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001609 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001610 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001612EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
1614/**
tcharding722c9a02017-02-09 17:56:04 +11001615 * unregister_netdevice_notifier - unregister a network notifier block
1616 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 *
tcharding722c9a02017-02-09 17:56:04 +11001618 * Unregister a notifier previously registered by
1619 * register_netdevice_notifier(). The notifier is unlinked into the
1620 * kernel structures and may then be reused. A negative errno code
1621 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001622 *
tcharding722c9a02017-02-09 17:56:04 +11001623 * After unregistering unregister and down device events are synthesized
1624 * for all devices on the device list to the removed notifier to remove
1625 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 */
1627
1628int unregister_netdevice_notifier(struct notifier_block *nb)
1629{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001630 struct net_device *dev;
1631 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001632 int err;
1633
1634 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001635 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001636 if (err)
1637 goto unlock;
1638
1639 for_each_net(net) {
1640 for_each_netdev(net, dev) {
1641 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001642 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1643 dev);
1644 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001645 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001646 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001647 }
1648 }
1649unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001650 rtnl_unlock();
1651 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001653EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
1655/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001656 * call_netdevice_notifiers_info - call all network notifier blocks
1657 * @val: value passed unmodified to notifier function
1658 * @dev: net_device pointer passed unmodified to notifier function
1659 * @info: notifier information data
1660 *
1661 * Call all network notifier blocks. Parameters and return value
1662 * are as for raw_notifier_call_chain().
1663 */
1664
stephen hemminger1d143d92013-12-29 14:01:29 -08001665static int call_netdevice_notifiers_info(unsigned long val,
1666 struct net_device *dev,
1667 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001668{
1669 ASSERT_RTNL();
1670 netdev_notifier_info_init(info, dev);
1671 return raw_notifier_call_chain(&netdev_chain, val, info);
1672}
Jiri Pirko351638e2013-05-28 01:30:21 +00001673
1674/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 * call_netdevice_notifiers - call all network notifier blocks
1676 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001677 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 *
1679 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001680 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 */
1682
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001683int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684{
Jiri Pirko351638e2013-05-28 01:30:21 +00001685 struct netdev_notifier_info info;
1686
1687 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001689EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Pablo Neira1cf519002015-05-13 18:19:37 +02001691#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02001692static struct static_key ingress_needed __read_mostly;
1693
1694void net_inc_ingress_queue(void)
1695{
1696 static_key_slow_inc(&ingress_needed);
1697}
1698EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1699
1700void net_dec_ingress_queue(void)
1701{
1702 static_key_slow_dec(&ingress_needed);
1703}
1704EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1705#endif
1706
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001707#ifdef CONFIG_NET_EGRESS
1708static struct static_key egress_needed __read_mostly;
1709
1710void net_inc_egress_queue(void)
1711{
1712 static_key_slow_inc(&egress_needed);
1713}
1714EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1715
1716void net_dec_egress_queue(void)
1717{
1718 static_key_slow_dec(&egress_needed);
1719}
1720EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1721#endif
1722
Ingo Molnarc5905af2012-02-24 08:31:31 +01001723static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001724#ifdef HAVE_JUMP_LABEL
Eric Dumazetb90e5792011-11-28 11:16:50 +00001725static atomic_t netstamp_needed_deferred;
Eric Dumazet13baa002017-03-01 14:28:39 -08001726static atomic_t netstamp_wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001727static void netstamp_clear(struct work_struct *work)
1728{
1729 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
Eric Dumazet13baa002017-03-01 14:28:39 -08001730 int wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001731
Eric Dumazet13baa002017-03-01 14:28:39 -08001732 wanted = atomic_add_return(deferred, &netstamp_wanted);
1733 if (wanted > 0)
1734 static_key_enable(&netstamp_needed);
1735 else
1736 static_key_disable(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001737}
1738static DECLARE_WORK(netstamp_work, netstamp_clear);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001739#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
1741void net_enable_timestamp(void)
1742{
Eric Dumazet13baa002017-03-01 14:28:39 -08001743#ifdef HAVE_JUMP_LABEL
1744 int wanted;
1745
1746 while (1) {
1747 wanted = atomic_read(&netstamp_wanted);
1748 if (wanted <= 0)
1749 break;
1750 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1751 return;
1752 }
1753 atomic_inc(&netstamp_needed_deferred);
1754 schedule_work(&netstamp_work);
1755#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001756 static_key_slow_inc(&netstamp_needed);
Eric Dumazet13baa002017-03-01 14:28:39 -08001757#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001759EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761void net_disable_timestamp(void)
1762{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001763#ifdef HAVE_JUMP_LABEL
Eric Dumazet13baa002017-03-01 14:28:39 -08001764 int wanted;
1765
1766 while (1) {
1767 wanted = atomic_read(&netstamp_wanted);
1768 if (wanted <= 1)
1769 break;
1770 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1771 return;
1772 }
1773 atomic_dec(&netstamp_needed_deferred);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001774 schedule_work(&netstamp_work);
1775#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001776 static_key_slow_dec(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001777#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001779EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
Eric Dumazet3b098e22010-05-15 23:57:10 -07001781static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782{
Thomas Gleixner2456e852016-12-25 11:38:40 +01001783 skb->tstamp = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001784 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001785 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786}
1787
Eric Dumazet588f0332011-11-15 04:12:55 +00001788#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001789 if (static_key_false(&netstamp_needed)) { \
Thomas Gleixner2456e852016-12-25 11:38:40 +01001790 if ((COND) && !(SKB)->tstamp) \
Eric Dumazet588f0332011-11-15 04:12:55 +00001791 __net_timestamp(SKB); \
1792 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001793
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02001794bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001795{
1796 unsigned int len;
1797
1798 if (!(dev->flags & IFF_UP))
1799 return false;
1800
1801 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1802 if (skb->len <= len)
1803 return true;
1804
1805 /* if TSO is enabled, we don't care about the length as the packet
1806 * could be forwarded without being segmented before
1807 */
1808 if (skb_is_gso(skb))
1809 return true;
1810
1811 return false;
1812}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001813EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001814
Herbert Xua0265d22014-04-17 13:45:03 +08001815int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1816{
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001817 int ret = ____dev_forward_skb(dev, skb);
1818
1819 if (likely(!ret)) {
1820 skb->protocol = eth_type_trans(skb, dev);
1821 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001822 }
1823
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001824 return ret;
Herbert Xua0265d22014-04-17 13:45:03 +08001825}
1826EXPORT_SYMBOL_GPL(__dev_forward_skb);
1827
Arnd Bergmann44540962009-11-26 06:07:08 +00001828/**
1829 * dev_forward_skb - loopback an skb to another netif
1830 *
1831 * @dev: destination network device
1832 * @skb: buffer to forward
1833 *
1834 * return values:
1835 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001836 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001837 *
1838 * dev_forward_skb can be used for injecting an skb from the
1839 * start_xmit function of one device into the receive queue
1840 * of another device.
1841 *
1842 * The receiving device may be in another namespace, so
1843 * we have to clear all information in the skb that could
1844 * impact namespace isolation.
1845 */
1846int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1847{
Herbert Xua0265d22014-04-17 13:45:03 +08001848 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001849}
1850EXPORT_SYMBOL_GPL(dev_forward_skb);
1851
Changli Gao71d9dec2010-12-15 19:57:25 +00001852static inline int deliver_skb(struct sk_buff *skb,
1853 struct packet_type *pt_prev,
1854 struct net_device *orig_dev)
1855{
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04001856 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001857 return -ENOMEM;
Reshetova, Elena63354792017-06-30 13:07:58 +03001858 refcount_inc(&skb->users);
Changli Gao71d9dec2010-12-15 19:57:25 +00001859 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1860}
1861
Salam Noureddine7866a622015-01-27 11:35:48 -08001862static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1863 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001864 struct net_device *orig_dev,
1865 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001866 struct list_head *ptype_list)
1867{
1868 struct packet_type *ptype, *pt_prev = *pt;
1869
1870 list_for_each_entry_rcu(ptype, ptype_list, list) {
1871 if (ptype->type != type)
1872 continue;
1873 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001874 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001875 pt_prev = ptype;
1876 }
1877 *pt = pt_prev;
1878}
1879
Eric Leblondc0de08d2012-08-16 22:02:58 +00001880static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1881{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001882 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001883 return false;
1884
1885 if (ptype->id_match)
1886 return ptype->id_match(ptype, skb->sk);
1887 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1888 return true;
1889
1890 return false;
1891}
1892
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893/*
1894 * Support routine. Sends outgoing frames to any network
1895 * taps currently in use.
1896 */
1897
David Ahern74b20582016-05-10 11:19:50 -07001898void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899{
1900 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001901 struct sk_buff *skb2 = NULL;
1902 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001903 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001904
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001906again:
1907 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 /* Never send packets back to the socket
1909 * they originated from - MvS (miquels@drinkel.ow.org)
1910 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001911 if (skb_loop_sk(ptype, skb))
1912 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001913
Salam Noureddine7866a622015-01-27 11:35:48 -08001914 if (pt_prev) {
1915 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001916 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001917 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001919
1920 /* need to clone skb, done only once */
1921 skb2 = skb_clone(skb, GFP_ATOMIC);
1922 if (!skb2)
1923 goto out_unlock;
1924
1925 net_timestamp_set(skb2);
1926
1927 /* skb->nh should be correctly
1928 * set by sender, so that the second statement is
1929 * just protection against buggy protocols.
1930 */
1931 skb_reset_mac_header(skb2);
1932
1933 if (skb_network_header(skb2) < skb2->data ||
1934 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1935 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1936 ntohs(skb2->protocol),
1937 dev->name);
1938 skb_reset_network_header(skb2);
1939 }
1940
1941 skb2->transport_header = skb2->network_header;
1942 skb2->pkt_type = PACKET_OUTGOING;
1943 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001945
1946 if (ptype_list == &ptype_all) {
1947 ptype_list = &dev->ptype_all;
1948 goto again;
1949 }
1950out_unlock:
Willem de Bruijn581fe0e2017-09-22 19:42:37 -04001951 if (pt_prev) {
1952 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
1953 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1954 else
1955 kfree_skb(skb2);
1956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 rcu_read_unlock();
1958}
David Ahern74b20582016-05-10 11:19:50 -07001959EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
Ben Hutchings2c530402012-07-10 10:55:09 +00001961/**
1962 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001963 * @dev: Network device
1964 * @txq: number of queues available
1965 *
1966 * If real_num_tx_queues is changed the tc mappings may no longer be
1967 * valid. To resolve this verify the tc mapping remains valid and if
1968 * not NULL the mapping. With no priorities mapping to this
1969 * offset/count pair it will no longer be used. In the worst case TC0
1970 * is invalid nothing can be done so disable priority mappings. If is
1971 * expected that drivers will fix this mapping if they can before
1972 * calling netif_set_real_num_tx_queues.
1973 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001974static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001975{
1976 int i;
1977 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1978
1979 /* If TC0 is invalidated disable TC mapping */
1980 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001981 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001982 dev->num_tc = 0;
1983 return;
1984 }
1985
1986 /* Invalidated prio to tc mappings set to TC0 */
1987 for (i = 1; i < TC_BITMASK + 1; i++) {
1988 int q = netdev_get_prio_tc_map(dev, i);
1989
1990 tc = &dev->tc_to_txq[q];
1991 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001992 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1993 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001994 netdev_set_prio_tc_map(dev, i, 0);
1995 }
1996 }
1997}
1998
Alexander Duyck8d059b02016-10-28 11:43:49 -04001999int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2000{
2001 if (dev->num_tc) {
2002 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2003 int i;
2004
2005 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2006 if ((txq - tc->offset) < tc->count)
2007 return i;
2008 }
2009
2010 return -1;
2011 }
2012
2013 return 0;
2014}
2015
Alexander Duyck537c00d2013-01-10 08:57:02 +00002016#ifdef CONFIG_XPS
2017static DEFINE_MUTEX(xps_map_mutex);
2018#define xmap_dereference(P) \
2019 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2020
Alexander Duyck6234f872016-10-28 11:46:49 -04002021static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2022 int tci, u16 index)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002023{
2024 struct xps_map *map = NULL;
2025 int pos;
2026
2027 if (dev_maps)
Alexander Duyck6234f872016-10-28 11:46:49 -04002028 map = xmap_dereference(dev_maps->cpu_map[tci]);
2029 if (!map)
2030 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002031
Alexander Duyck6234f872016-10-28 11:46:49 -04002032 for (pos = map->len; pos--;) {
2033 if (map->queues[pos] != index)
2034 continue;
2035
2036 if (map->len > 1) {
2037 map->queues[pos] = map->queues[--map->len];
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002038 break;
2039 }
Alexander Duyck6234f872016-10-28 11:46:49 -04002040
2041 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2042 kfree_rcu(map, rcu);
2043 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002044 }
2045
Alexander Duyck6234f872016-10-28 11:46:49 -04002046 return true;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002047}
2048
Alexander Duyck6234f872016-10-28 11:46:49 -04002049static bool remove_xps_queue_cpu(struct net_device *dev,
2050 struct xps_dev_maps *dev_maps,
2051 int cpu, u16 offset, u16 count)
2052{
Alexander Duyck184c4492016-10-28 11:50:13 -04002053 int num_tc = dev->num_tc ? : 1;
2054 bool active = false;
2055 int tci;
Alexander Duyck6234f872016-10-28 11:46:49 -04002056
Alexander Duyck184c4492016-10-28 11:50:13 -04002057 for (tci = cpu * num_tc; num_tc--; tci++) {
2058 int i, j;
2059
2060 for (i = count, j = offset; i--; j++) {
2061 if (!remove_xps_queue(dev_maps, cpu, j))
2062 break;
2063 }
2064
2065 active |= i < 0;
Alexander Duyck6234f872016-10-28 11:46:49 -04002066 }
2067
Alexander Duyck184c4492016-10-28 11:50:13 -04002068 return active;
Alexander Duyck6234f872016-10-28 11:46:49 -04002069}
2070
2071static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2072 u16 count)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002073{
2074 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00002075 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002076 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002077
2078 mutex_lock(&xps_map_mutex);
2079 dev_maps = xmap_dereference(dev->xps_maps);
2080
2081 if (!dev_maps)
2082 goto out_no_maps;
2083
Alexander Duyck6234f872016-10-28 11:46:49 -04002084 for_each_possible_cpu(cpu)
2085 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2086 offset, count);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002087
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002088 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00002089 RCU_INIT_POINTER(dev->xps_maps, NULL);
2090 kfree_rcu(dev_maps, rcu);
2091 }
2092
Alexander Duyck6234f872016-10-28 11:46:49 -04002093 for (i = offset + (count - 1); count--; i--)
Alexander Duyck024e9672013-01-10 08:57:46 +00002094 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2095 NUMA_NO_NODE);
2096
Alexander Duyck537c00d2013-01-10 08:57:02 +00002097out_no_maps:
2098 mutex_unlock(&xps_map_mutex);
2099}
2100
Alexander Duyck6234f872016-10-28 11:46:49 -04002101static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2102{
2103 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2104}
2105
Alexander Duyck01c5f862013-01-10 08:57:35 +00002106static struct xps_map *expand_xps_map(struct xps_map *map,
2107 int cpu, u16 index)
2108{
2109 struct xps_map *new_map;
2110 int alloc_len = XPS_MIN_MAP_ALLOC;
2111 int i, pos;
2112
2113 for (pos = 0; map && pos < map->len; pos++) {
2114 if (map->queues[pos] != index)
2115 continue;
2116 return map;
2117 }
2118
2119 /* Need to add queue to this CPU's existing map */
2120 if (map) {
2121 if (pos < map->alloc_len)
2122 return map;
2123
2124 alloc_len = map->alloc_len * 2;
2125 }
2126
2127 /* Need to allocate new map to store queue on this CPU's map */
2128 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2129 cpu_to_node(cpu));
2130 if (!new_map)
2131 return NULL;
2132
2133 for (i = 0; i < pos; i++)
2134 new_map->queues[i] = map->queues[i];
2135 new_map->alloc_len = alloc_len;
2136 new_map->len = pos;
2137
2138 return new_map;
2139}
2140
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002141int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2142 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002143{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002144 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck184c4492016-10-28 11:50:13 -04002145 int i, cpu, tci, numa_node_id = -2;
2146 int maps_sz, num_tc = 1, tc = 0;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002147 struct xps_map *map, *new_map;
Alexander Duyck01c5f862013-01-10 08:57:35 +00002148 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002149
Alexander Duyck184c4492016-10-28 11:50:13 -04002150 if (dev->num_tc) {
2151 num_tc = dev->num_tc;
2152 tc = netdev_txq_to_tc(dev, index);
2153 if (tc < 0)
2154 return -EINVAL;
2155 }
2156
2157 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2158 if (maps_sz < L1_CACHE_BYTES)
2159 maps_sz = L1_CACHE_BYTES;
2160
Alexander Duyck537c00d2013-01-10 08:57:02 +00002161 mutex_lock(&xps_map_mutex);
2162
2163 dev_maps = xmap_dereference(dev->xps_maps);
2164
Alexander Duyck01c5f862013-01-10 08:57:35 +00002165 /* allocate memory for queue storage */
Alexander Duyck184c4492016-10-28 11:50:13 -04002166 for_each_cpu_and(cpu, cpu_online_mask, mask) {
Alexander Duyck01c5f862013-01-10 08:57:35 +00002167 if (!new_dev_maps)
2168 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002169 if (!new_dev_maps) {
2170 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002171 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002172 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002173
Alexander Duyck184c4492016-10-28 11:50:13 -04002174 tci = cpu * num_tc + tc;
2175 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
Alexander Duyck01c5f862013-01-10 08:57:35 +00002176 NULL;
2177
2178 map = expand_xps_map(map, cpu, index);
2179 if (!map)
2180 goto error;
2181
Alexander Duyck184c4492016-10-28 11:50:13 -04002182 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002183 }
2184
2185 if (!new_dev_maps)
2186 goto out_no_new_maps;
2187
2188 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002189 /* copy maps belonging to foreign traffic classes */
2190 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2191 /* fill in the new device map from the old device map */
2192 map = xmap_dereference(dev_maps->cpu_map[tci]);
2193 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2194 }
2195
2196 /* We need to explicitly update tci as prevous loop
2197 * could break out early if dev_maps is NULL.
2198 */
2199 tci = cpu * num_tc + tc;
2200
Alexander Duyck01c5f862013-01-10 08:57:35 +00002201 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2202 /* add queue to CPU maps */
2203 int pos = 0;
2204
Alexander Duyck184c4492016-10-28 11:50:13 -04002205 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002206 while ((pos < map->len) && (map->queues[pos] != index))
2207 pos++;
2208
2209 if (pos == map->len)
2210 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002211#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002212 if (numa_node_id == -2)
2213 numa_node_id = cpu_to_node(cpu);
2214 else if (numa_node_id != cpu_to_node(cpu))
2215 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002216#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002217 } else if (dev_maps) {
2218 /* fill in the new device map from the old device map */
Alexander Duyck184c4492016-10-28 11:50:13 -04002219 map = xmap_dereference(dev_maps->cpu_map[tci]);
2220 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002221 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002222
Alexander Duyck184c4492016-10-28 11:50:13 -04002223 /* copy maps belonging to foreign traffic classes */
2224 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2225 /* fill in the new device map from the old device map */
2226 map = xmap_dereference(dev_maps->cpu_map[tci]);
2227 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2228 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002229 }
2230
Alexander Duyck01c5f862013-01-10 08:57:35 +00002231 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2232
Alexander Duyck537c00d2013-01-10 08:57:02 +00002233 /* Cleanup old maps */
Alexander Duyck184c4492016-10-28 11:50:13 -04002234 if (!dev_maps)
2235 goto out_no_old_maps;
2236
2237 for_each_possible_cpu(cpu) {
2238 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2239 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2240 map = xmap_dereference(dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002241 if (map && map != new_map)
2242 kfree_rcu(map, rcu);
2243 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002244 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002245
Alexander Duyck184c4492016-10-28 11:50:13 -04002246 kfree_rcu(dev_maps, rcu);
2247
2248out_no_old_maps:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002249 dev_maps = new_dev_maps;
2250 active = true;
2251
2252out_no_new_maps:
2253 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002254 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2255 (numa_node_id >= 0) ? numa_node_id :
2256 NUMA_NO_NODE);
2257
Alexander Duyck01c5f862013-01-10 08:57:35 +00002258 if (!dev_maps)
2259 goto out_no_maps;
2260
2261 /* removes queue from unused CPUs */
2262 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002263 for (i = tc, tci = cpu * num_tc; i--; tci++)
2264 active |= remove_xps_queue(dev_maps, tci, index);
2265 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2266 active |= remove_xps_queue(dev_maps, tci, index);
2267 for (i = num_tc - tc, tci++; --i; tci++)
2268 active |= remove_xps_queue(dev_maps, tci, index);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002269 }
2270
2271 /* free map if not active */
2272 if (!active) {
2273 RCU_INIT_POINTER(dev->xps_maps, NULL);
2274 kfree_rcu(dev_maps, rcu);
2275 }
2276
2277out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002278 mutex_unlock(&xps_map_mutex);
2279
2280 return 0;
2281error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002282 /* remove any maps that we added */
2283 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002284 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2285 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2286 map = dev_maps ?
2287 xmap_dereference(dev_maps->cpu_map[tci]) :
2288 NULL;
2289 if (new_map && new_map != map)
2290 kfree(new_map);
2291 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002292 }
2293
Alexander Duyck537c00d2013-01-10 08:57:02 +00002294 mutex_unlock(&xps_map_mutex);
2295
Alexander Duyck537c00d2013-01-10 08:57:02 +00002296 kfree(new_dev_maps);
2297 return -ENOMEM;
2298}
2299EXPORT_SYMBOL(netif_set_xps_queue);
2300
2301#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002302void netdev_reset_tc(struct net_device *dev)
2303{
Alexander Duyck6234f872016-10-28 11:46:49 -04002304#ifdef CONFIG_XPS
2305 netif_reset_xps_queues_gt(dev, 0);
2306#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002307 dev->num_tc = 0;
2308 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2309 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2310}
2311EXPORT_SYMBOL(netdev_reset_tc);
2312
2313int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2314{
2315 if (tc >= dev->num_tc)
2316 return -EINVAL;
2317
Alexander Duyck6234f872016-10-28 11:46:49 -04002318#ifdef CONFIG_XPS
2319 netif_reset_xps_queues(dev, offset, count);
2320#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002321 dev->tc_to_txq[tc].count = count;
2322 dev->tc_to_txq[tc].offset = offset;
2323 return 0;
2324}
2325EXPORT_SYMBOL(netdev_set_tc_queue);
2326
2327int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2328{
2329 if (num_tc > TC_MAX_QUEUE)
2330 return -EINVAL;
2331
Alexander Duyck6234f872016-10-28 11:46:49 -04002332#ifdef CONFIG_XPS
2333 netif_reset_xps_queues_gt(dev, 0);
2334#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002335 dev->num_tc = num_tc;
2336 return 0;
2337}
2338EXPORT_SYMBOL(netdev_set_num_tc);
2339
John Fastabendf0796d52010-07-01 13:21:57 +00002340/*
2341 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2342 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2343 */
Tom Herberte6484932010-10-18 18:04:39 +00002344int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002345{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002346 int rc;
2347
Tom Herberte6484932010-10-18 18:04:39 +00002348 if (txq < 1 || txq > dev->num_tx_queues)
2349 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002350
Ben Hutchings5c565802011-02-15 19:39:21 +00002351 if (dev->reg_state == NETREG_REGISTERED ||
2352 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002353 ASSERT_RTNL();
2354
Tom Herbert1d24eb42010-11-21 13:17:27 +00002355 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2356 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002357 if (rc)
2358 return rc;
2359
John Fastabend4f57c082011-01-17 08:06:04 +00002360 if (dev->num_tc)
2361 netif_setup_tc(dev, txq);
2362
Alexander Duyck024e9672013-01-10 08:57:46 +00002363 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002364 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002365#ifdef CONFIG_XPS
2366 netif_reset_xps_queues_gt(dev, txq);
2367#endif
2368 }
John Fastabendf0796d52010-07-01 13:21:57 +00002369 }
Tom Herberte6484932010-10-18 18:04:39 +00002370
2371 dev->real_num_tx_queues = txq;
2372 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002373}
2374EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002375
Michael Daltona953be52014-01-16 22:23:28 -08002376#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002377/**
2378 * netif_set_real_num_rx_queues - set actual number of RX queues used
2379 * @dev: Network device
2380 * @rxq: Actual number of RX queues
2381 *
2382 * This must be called either with the rtnl_lock held or before
2383 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002384 * negative error code. If called before registration, it always
2385 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002386 */
2387int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2388{
2389 int rc;
2390
Tom Herbertbd25fa72010-10-18 18:00:16 +00002391 if (rxq < 1 || rxq > dev->num_rx_queues)
2392 return -EINVAL;
2393
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002394 if (dev->reg_state == NETREG_REGISTERED) {
2395 ASSERT_RTNL();
2396
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002397 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2398 rxq);
2399 if (rc)
2400 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002401 }
2402
2403 dev->real_num_rx_queues = rxq;
2404 return 0;
2405}
2406EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2407#endif
2408
Ben Hutchings2c530402012-07-10 10:55:09 +00002409/**
2410 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002411 *
2412 * This routine should set an upper limit on the number of RSS queues
2413 * used by default by multiqueue devices.
2414 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002415int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002416{
Hariprasad Shenai40e4e712016-06-08 18:09:08 +05302417 return is_kdump_kernel() ?
2418 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
Yuval Mintz16917b82012-07-01 03:18:50 +00002419}
2420EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2421
Eric Dumazet3bcb8462016-06-04 20:02:28 -07002422static void __netif_reschedule(struct Qdisc *q)
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002423{
2424 struct softnet_data *sd;
2425 unsigned long flags;
2426
2427 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002428 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002429 q->next_sched = NULL;
2430 *sd->output_queue_tailp = q;
2431 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002432 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2433 local_irq_restore(flags);
2434}
2435
David S. Miller37437bb2008-07-16 02:15:04 -07002436void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002437{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002438 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2439 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002440}
2441EXPORT_SYMBOL(__netif_schedule);
2442
Eric Dumazete6247022013-12-05 04:45:08 -08002443struct dev_kfree_skb_cb {
2444 enum skb_free_reason reason;
2445};
2446
2447static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002448{
Eric Dumazete6247022013-12-05 04:45:08 -08002449 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002450}
Denis Vlasenko56079432006-03-29 15:57:29 -08002451
John Fastabend46e5da42014-09-12 20:04:52 -07002452void netif_schedule_queue(struct netdev_queue *txq)
2453{
2454 rcu_read_lock();
2455 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2456 struct Qdisc *q = rcu_dereference(txq->qdisc);
2457
2458 __netif_schedule(q);
2459 }
2460 rcu_read_unlock();
2461}
2462EXPORT_SYMBOL(netif_schedule_queue);
2463
John Fastabend46e5da42014-09-12 20:04:52 -07002464void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2465{
2466 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2467 struct Qdisc *q;
2468
2469 rcu_read_lock();
2470 q = rcu_dereference(dev_queue->qdisc);
2471 __netif_schedule(q);
2472 rcu_read_unlock();
2473 }
2474}
2475EXPORT_SYMBOL(netif_tx_wake_queue);
2476
Eric Dumazete6247022013-12-05 04:45:08 -08002477void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2478{
2479 unsigned long flags;
2480
Myungho Jung98998862017-04-25 11:58:15 -07002481 if (unlikely(!skb))
2482 return;
2483
Reshetova, Elena63354792017-06-30 13:07:58 +03002484 if (likely(refcount_read(&skb->users) == 1)) {
Eric Dumazete6247022013-12-05 04:45:08 -08002485 smp_rmb();
Reshetova, Elena63354792017-06-30 13:07:58 +03002486 refcount_set(&skb->users, 0);
2487 } else if (likely(!refcount_dec_and_test(&skb->users))) {
Eric Dumazete6247022013-12-05 04:45:08 -08002488 return;
2489 }
2490 get_kfree_skb_cb(skb)->reason = reason;
2491 local_irq_save(flags);
2492 skb->next = __this_cpu_read(softnet_data.completion_queue);
2493 __this_cpu_write(softnet_data.completion_queue, skb);
2494 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2495 local_irq_restore(flags);
2496}
2497EXPORT_SYMBOL(__dev_kfree_skb_irq);
2498
2499void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002500{
2501 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002502 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002503 else
2504 dev_kfree_skb(skb);
2505}
Eric Dumazete6247022013-12-05 04:45:08 -08002506EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002507
2508
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002509/**
2510 * netif_device_detach - mark device as removed
2511 * @dev: network device
2512 *
2513 * Mark device as removed from system and therefore no longer available.
2514 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002515void netif_device_detach(struct net_device *dev)
2516{
2517 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2518 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002519 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002520 }
2521}
2522EXPORT_SYMBOL(netif_device_detach);
2523
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002524/**
2525 * netif_device_attach - mark device as attached
2526 * @dev: network device
2527 *
2528 * Mark device as attached from system and restart if needed.
2529 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002530void netif_device_attach(struct net_device *dev)
2531{
2532 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2533 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002534 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002535 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002536 }
2537}
2538EXPORT_SYMBOL(netif_device_attach);
2539
Jiri Pirko5605c762015-05-12 14:56:12 +02002540/*
2541 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2542 * to be used as a distribution range.
2543 */
2544u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2545 unsigned int num_tx_queues)
2546{
2547 u32 hash;
2548 u16 qoffset = 0;
2549 u16 qcount = num_tx_queues;
2550
2551 if (skb_rx_queue_recorded(skb)) {
2552 hash = skb_get_rx_queue(skb);
2553 while (unlikely(hash >= num_tx_queues))
2554 hash -= num_tx_queues;
2555 return hash;
2556 }
2557
2558 if (dev->num_tc) {
2559 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
tchardingf4563a72017-02-09 17:56:07 +11002560
Jiri Pirko5605c762015-05-12 14:56:12 +02002561 qoffset = dev->tc_to_txq[tc].offset;
2562 qcount = dev->tc_to_txq[tc].count;
2563 }
2564
2565 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2566}
2567EXPORT_SYMBOL(__skb_tx_hash);
2568
Ben Hutchings36c92472012-01-17 07:57:56 +00002569static void skb_warn_bad_offload(const struct sk_buff *skb)
2570{
Wei Tang84d15ae2016-06-16 21:17:49 +08002571 static const netdev_features_t null_features;
Ben Hutchings36c92472012-01-17 07:57:56 +00002572 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01002573 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00002574
Ben Greearc846ad92013-04-19 10:45:52 +00002575 if (!net_ratelimit())
2576 return;
2577
Bjørn Mork88ad4172015-11-16 19:16:40 +01002578 if (dev) {
2579 if (dev->dev.parent)
2580 name = dev_driver_string(dev->dev.parent);
2581 else
2582 name = netdev_name(dev);
2583 }
Ben Hutchings36c92472012-01-17 07:57:56 +00002584 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2585 "gso_type=%d ip_summed=%d\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01002586 name, dev ? &dev->features : &null_features,
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002587 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002588 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2589 skb_shinfo(skb)->gso_type, skb->ip_summed);
2590}
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592/*
2593 * Invalidate hardware checksum when packet is to be mangled, and
2594 * complete checksum manually on outgoing path.
2595 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002596int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597{
Al Virod3bc23e2006-11-14 21:24:49 -08002598 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002599 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
Patrick McHardy84fa7932006-08-29 16:44:56 -07002601 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002602 goto out_set_summed;
2603
2604 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002605 skb_warn_bad_offload(skb);
2606 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 }
2608
Eric Dumazetcef401d2013-01-25 20:34:37 +00002609 /* Before computing a checksum, we should make sure no frag could
2610 * be modified by an external entity : checksum could be wrong.
2611 */
2612 if (skb_has_shared_frag(skb)) {
2613 ret = __skb_linearize(skb);
2614 if (ret)
2615 goto out;
2616 }
2617
Michał Mirosław55508d62010-12-14 15:24:08 +00002618 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002619 BUG_ON(offset >= skb_headlen(skb));
2620 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2621
2622 offset += skb->csum_offset;
2623 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2624
2625 if (skb_cloned(skb) &&
2626 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2628 if (ret)
2629 goto out;
2630 }
2631
Eric Dumazet4f2e4ad2016-10-29 11:02:36 -07002632 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
Herbert Xua430a432006-07-08 13:34:56 -07002633out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002635out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 return ret;
2637}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002638EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639
Davide Carattib72b5bf2017-05-18 15:44:38 +02002640int skb_crc32c_csum_help(struct sk_buff *skb)
2641{
2642 __le32 crc32c_csum;
2643 int ret = 0, offset, start;
2644
2645 if (skb->ip_summed != CHECKSUM_PARTIAL)
2646 goto out;
2647
2648 if (unlikely(skb_is_gso(skb)))
2649 goto out;
2650
2651 /* Before computing a checksum, we should make sure no frag could
2652 * be modified by an external entity : checksum could be wrong.
2653 */
2654 if (unlikely(skb_has_shared_frag(skb))) {
2655 ret = __skb_linearize(skb);
2656 if (ret)
2657 goto out;
2658 }
2659 start = skb_checksum_start_offset(skb);
2660 offset = start + offsetof(struct sctphdr, checksum);
2661 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2662 ret = -EINVAL;
2663 goto out;
2664 }
2665 if (skb_cloned(skb) &&
2666 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2667 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2668 if (ret)
2669 goto out;
2670 }
2671 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2672 skb->len - start, ~(__u32)0,
2673 crc32c_csum_stub));
2674 *(__le32 *)(skb->data + offset) = crc32c_csum;
2675 skb->ip_summed = CHECKSUM_NONE;
Davide Carattidba00302017-05-18 15:44:40 +02002676 skb->csum_not_inet = 0;
Davide Carattib72b5bf2017-05-18 15:44:38 +02002677out:
2678 return ret;
2679}
2680
Vlad Yasevich53d64712014-03-27 17:26:18 -04002681__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002682{
2683 __be16 type = skb->protocol;
2684
Pravin B Shelar19acc322013-05-07 20:41:07 +00002685 /* Tunnel gso handlers can set protocol to ethernet. */
2686 if (type == htons(ETH_P_TEB)) {
2687 struct ethhdr *eth;
2688
2689 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2690 return 0;
2691
2692 eth = (struct ethhdr *)skb_mac_header(skb);
2693 type = eth->h_proto;
2694 }
2695
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002696 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002697}
2698
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002699/**
2700 * skb_mac_gso_segment - mac layer segmentation handler.
2701 * @skb: buffer to segment
2702 * @features: features for the output path (see dev->features)
2703 */
2704struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2705 netdev_features_t features)
2706{
2707 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2708 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002709 int vlan_depth = skb->mac_len;
2710 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002711
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002712 if (unlikely(!type))
2713 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002714
Vlad Yasevich53d64712014-03-27 17:26:18 -04002715 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002716
2717 rcu_read_lock();
2718 list_for_each_entry_rcu(ptype, &offload_base, list) {
2719 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002720 segs = ptype->callbacks.gso_segment(skb, features);
2721 break;
2722 }
2723 }
2724 rcu_read_unlock();
2725
2726 __skb_push(skb, skb->data - skb_mac_header(skb));
2727
2728 return segs;
2729}
2730EXPORT_SYMBOL(skb_mac_gso_segment);
2731
2732
Cong Wang12b00042013-02-05 16:36:38 +00002733/* openvswitch calls this on rx path, so we need a different check.
2734 */
2735static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2736{
2737 if (tx_path)
Tonghao Zhang93991222017-08-10 20:16:29 -07002738 return skb->ip_summed != CHECKSUM_PARTIAL;
Eric Dumazet6e7bc472017-02-03 14:29:42 -08002739
2740 return skb->ip_summed == CHECKSUM_NONE;
Cong Wang12b00042013-02-05 16:36:38 +00002741}
2742
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002743/**
Cong Wang12b00042013-02-05 16:36:38 +00002744 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002745 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002746 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002747 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002748 *
2749 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002750 *
2751 * It may return NULL if the skb requires no segmentation. This is
2752 * only possible when GSO is used for verifying header integrity.
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002753 *
2754 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002755 */
Cong Wang12b00042013-02-05 16:36:38 +00002756struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2757 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002758{
Eric Dumazetb2504a52017-01-31 10:20:32 -08002759 struct sk_buff *segs;
2760
Cong Wang12b00042013-02-05 16:36:38 +00002761 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002762 int err;
2763
Eric Dumazetb2504a52017-01-31 10:20:32 -08002764 /* We're going to init ->check field in TCP or UDP header */
françois romieua40e0a62014-07-15 23:55:35 +02002765 err = skb_cow_head(skb, 0);
2766 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002767 return ERR_PTR(err);
2768 }
2769
Alexander Duyck802ab552016-04-10 21:45:03 -04002770 /* Only report GSO partial support if it will enable us to
2771 * support segmentation on this frame without needing additional
2772 * work.
2773 */
2774 if (features & NETIF_F_GSO_PARTIAL) {
2775 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2776 struct net_device *dev = skb->dev;
2777
2778 partial_features |= dev->features & dev->gso_partial_features;
2779 if (!skb_gso_ok(skb, features | partial_features))
2780 features &= ~NETIF_F_GSO_PARTIAL;
2781 }
2782
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002783 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2784 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2785
Pravin B Shelar68c33162013-02-14 14:02:41 +00002786 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002787 SKB_GSO_CB(skb)->encap_level = 0;
2788
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002789 skb_reset_mac_header(skb);
2790 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002791
Eric Dumazetb2504a52017-01-31 10:20:32 -08002792 segs = skb_mac_gso_segment(skb, features);
2793
2794 if (unlikely(skb_needs_check(skb, tx_path)))
2795 skb_warn_bad_offload(skb);
2796
2797 return segs;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002798}
Cong Wang12b00042013-02-05 16:36:38 +00002799EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002800
Herbert Xufb286bb2005-11-10 13:01:24 -08002801/* Take action when hardware reception checksum errors are detected. */
2802#ifdef CONFIG_BUG
2803void netdev_rx_csum_fault(struct net_device *dev)
2804{
2805 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002806 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002807 dump_stack();
2808 }
2809}
2810EXPORT_SYMBOL(netdev_rx_csum_fault);
2811#endif
2812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813/* Actually, we should eliminate this check as soon as we know, that:
2814 * 1. IOMMU is present and allows to map all the memory.
2815 * 2. No high memory really exists on this machine.
2816 */
2817
Florian Westphalc1e756b2014-05-05 15:00:44 +02002818static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002820#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 int i;
tchardingf4563a72017-02-09 17:56:07 +11002822
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002823 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002824 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2825 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tchardingf4563a72017-02-09 17:56:07 +11002826
Ian Campbellea2ab692011-08-22 23:44:58 +00002827 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002828 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002829 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002832 if (PCI_DMA_BUS_IS_PHYS) {
2833 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
Eric Dumazet9092c652010-04-02 13:34:49 -07002835 if (!pdev)
2836 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002837 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002838 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2839 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
tchardingf4563a72017-02-09 17:56:07 +11002840
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002841 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2842 return 1;
2843 }
2844 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002845#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 return 0;
2847}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848
Simon Horman3b392dd2014-06-04 08:53:17 +09002849/* If MPLS offload request, verify we are testing hardware MPLS features
2850 * instead of standard features for the netdev.
2851 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002852#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002853static netdev_features_t net_mpls_features(struct sk_buff *skb,
2854 netdev_features_t features,
2855 __be16 type)
2856{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002857 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002858 features &= skb->dev->mpls_features;
2859
2860 return features;
2861}
2862#else
2863static netdev_features_t net_mpls_features(struct sk_buff *skb,
2864 netdev_features_t features,
2865 __be16 type)
2866{
2867 return features;
2868}
2869#endif
2870
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002871static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002872 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002873{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002874 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002875 __be16 type;
2876
2877 type = skb_network_protocol(skb, &tmp);
2878 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002879
Ed Cashinc0d680e2012-09-19 15:49:00 +00002880 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002881 !can_checksum_protocol(features, type)) {
Alexander Duyck996e8022016-05-02 09:25:10 -07002882 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Jesse Grossf01a5232011-01-09 06:23:31 +00002883 }
Eric Dumazet7be2c822017-01-18 12:12:17 -08002884 if (illegal_highdma(skb->dev, skb))
2885 features &= ~NETIF_F_SG;
Jesse Grossf01a5232011-01-09 06:23:31 +00002886
2887 return features;
2888}
2889
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002890netdev_features_t passthru_features_check(struct sk_buff *skb,
2891 struct net_device *dev,
2892 netdev_features_t features)
2893{
2894 return features;
2895}
2896EXPORT_SYMBOL(passthru_features_check);
2897
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002898static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2899 struct net_device *dev,
2900 netdev_features_t features)
2901{
2902 return vlan_features_check(skb, features);
2903}
2904
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002905static netdev_features_t gso_features_check(const struct sk_buff *skb,
2906 struct net_device *dev,
2907 netdev_features_t features)
2908{
2909 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2910
2911 if (gso_segs > dev->gso_max_segs)
2912 return features & ~NETIF_F_GSO_MASK;
2913
Alexander Duyck802ab552016-04-10 21:45:03 -04002914 /* Support for GSO partial features requires software
2915 * intervention before we can actually process the packets
2916 * so we need to strip support for any partial features now
2917 * and we can pull them back in after we have partially
2918 * segmented the frame.
2919 */
2920 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2921 features &= ~dev->gso_partial_features;
2922
2923 /* Make sure to clear the IPv4 ID mangling feature if the
2924 * IPv4 header has the potential to be fragmented.
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002925 */
2926 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2927 struct iphdr *iph = skb->encapsulation ?
2928 inner_ip_hdr(skb) : ip_hdr(skb);
2929
2930 if (!(iph->frag_off & htons(IP_DF)))
2931 features &= ~NETIF_F_TSO_MANGLEID;
2932 }
2933
2934 return features;
2935}
2936
Florian Westphalc1e756b2014-05-05 15:00:44 +02002937netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002938{
Jesse Gross5f352272014-12-23 22:37:26 -08002939 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002940 netdev_features_t features = dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002941
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002942 if (skb_is_gso(skb))
2943 features = gso_features_check(skb, dev, features);
Ben Hutchings30b678d2012-07-30 15:57:00 +00002944
Jesse Gross5f352272014-12-23 22:37:26 -08002945 /* If encapsulation offload request, verify we are testing
2946 * hardware encapsulation features instead of standard
2947 * features for the netdev
2948 */
2949 if (skb->encapsulation)
2950 features &= dev->hw_enc_features;
2951
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09002952 if (skb_vlan_tagged(skb))
2953 features = netdev_intersect_features(features,
2954 dev->vlan_features |
2955 NETIF_F_HW_VLAN_CTAG_TX |
2956 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00002957
Jesse Gross5f352272014-12-23 22:37:26 -08002958 if (dev->netdev_ops->ndo_features_check)
2959 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2960 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002961 else
2962 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002963
Florian Westphalc1e756b2014-05-05 15:00:44 +02002964 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002965}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002966EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002967
David S. Miller2ea25512014-08-29 21:10:01 -07002968static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07002969 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002970{
David S. Miller2ea25512014-08-29 21:10:01 -07002971 unsigned int len;
2972 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08002973
Salam Noureddine7866a622015-01-27 11:35:48 -08002974 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07002975 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00002976
David S. Miller2ea25512014-08-29 21:10:01 -07002977 len = skb->len;
2978 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07002979 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07002980 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00002981
Patrick McHardy572a9d72009-11-10 06:14:14 +00002982 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002983}
David S. Miller2ea25512014-08-29 21:10:01 -07002984
David S. Miller8dcda222014-09-01 15:06:40 -07002985struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2986 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07002987{
2988 struct sk_buff *skb = first;
2989 int rc = NETDEV_TX_OK;
2990
2991 while (skb) {
2992 struct sk_buff *next = skb->next;
2993
2994 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07002995 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07002996 if (unlikely(!dev_xmit_complete(rc))) {
2997 skb->next = next;
2998 goto out;
2999 }
3000
3001 skb = next;
3002 if (netif_xmit_stopped(txq) && skb) {
3003 rc = NETDEV_TX_BUSY;
3004 break;
3005 }
3006 }
3007
3008out:
3009 *ret = rc;
3010 return skb;
3011}
3012
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07003013static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3014 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07003015{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003016 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01003017 !vlan_hw_offload_capable(features, skb->vlan_proto))
3018 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07003019 return skb;
3020}
3021
Davide Caratti43c26a12017-05-18 15:44:41 +02003022int skb_csum_hwoffload_help(struct sk_buff *skb,
3023 const netdev_features_t features)
3024{
3025 if (unlikely(skb->csum_not_inet))
3026 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3027 skb_crc32c_csum_help(skb);
3028
3029 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3030}
3031EXPORT_SYMBOL(skb_csum_hwoffload_help);
3032
Eric Dumazet55a93b32014-10-03 15:31:07 -07003033static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
David S. Millereae3f882014-08-30 15:17:13 -07003034{
3035 netdev_features_t features;
3036
David S. Millereae3f882014-08-30 15:17:13 -07003037 features = netif_skb_features(skb);
3038 skb = validate_xmit_vlan(skb, features);
3039 if (unlikely(!skb))
3040 goto out_null;
3041
Johannes Berg8b86a612015-04-17 15:45:04 +02003042 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07003043 struct sk_buff *segs;
3044
3045 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08003046 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08003047 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08003048 } else if (segs) {
3049 consume_skb(skb);
3050 skb = segs;
3051 }
David S. Millereae3f882014-08-30 15:17:13 -07003052 } else {
3053 if (skb_needs_linearize(skb, features) &&
3054 __skb_linearize(skb))
3055 goto out_kfree_skb;
3056
Steffen Klassertf6e27112017-04-14 10:07:28 +02003057 if (validate_xmit_xfrm(skb, features))
3058 goto out_kfree_skb;
3059
David S. Millereae3f882014-08-30 15:17:13 -07003060 /* If packet is not checksummed and device does not
3061 * support checksumming for this protocol, complete
3062 * checksumming here.
3063 */
3064 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3065 if (skb->encapsulation)
3066 skb_set_inner_transport_header(skb,
3067 skb_checksum_start_offset(skb));
3068 else
3069 skb_set_transport_header(skb,
3070 skb_checksum_start_offset(skb));
Davide Caratti43c26a12017-05-18 15:44:41 +02003071 if (skb_csum_hwoffload_help(skb, features))
David S. Millereae3f882014-08-30 15:17:13 -07003072 goto out_kfree_skb;
3073 }
3074 }
3075
3076 return skb;
3077
3078out_kfree_skb:
3079 kfree_skb(skb);
3080out_null:
Eric Dumazetd21fd632016-04-12 21:50:07 -07003081 atomic_long_inc(&dev->tx_dropped);
David S. Millereae3f882014-08-30 15:17:13 -07003082 return NULL;
3083}
3084
Eric Dumazet55a93b32014-10-03 15:31:07 -07003085struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3086{
3087 struct sk_buff *next, *head = NULL, *tail;
3088
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003089 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07003090 next = skb->next;
3091 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003092
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003093 /* in case skb wont be segmented, point to itself */
3094 skb->prev = skb;
3095
3096 skb = validate_xmit_skb(skb, dev);
3097 if (!skb)
3098 continue;
3099
3100 if (!head)
3101 head = skb;
3102 else
3103 tail->next = skb;
3104 /* If skb was segmented, skb->prev points to
3105 * the last segment. If not, it still contains skb.
3106 */
3107 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003108 }
3109 return head;
3110}
Willem de Bruijn104ba782016-10-26 11:23:07 -04003111EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003112
Eric Dumazet1def9232013-01-10 12:36:42 +00003113static void qdisc_pkt_len_init(struct sk_buff *skb)
3114{
3115 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3116
3117 qdisc_skb_cb(skb)->pkt_len = skb->len;
3118
3119 /* To get more precise estimation of bytes sent on wire,
3120 * we add to pkt_len the headers size of all segments
3121 */
3122 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08003123 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00003124 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00003125
Eric Dumazet757b8b12013-01-15 21:14:21 -08003126 /* mac layer + network layer */
3127 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3128
3129 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00003130 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3131 hdr_len += tcp_hdrlen(skb);
3132 else
3133 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00003134
3135 if (shinfo->gso_type & SKB_GSO_DODGY)
3136 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3137 shinfo->gso_size);
3138
3139 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00003140 }
3141}
3142
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003143static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3144 struct net_device *dev,
3145 struct netdev_queue *txq)
3146{
3147 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazet520ac302016-06-21 23:16:49 -07003148 struct sk_buff *to_free = NULL;
Eric Dumazeta2da5702011-01-20 03:48:19 +00003149 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003150 int rc;
3151
Eric Dumazeta2da5702011-01-20 03:48:19 +00003152 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003153 /*
3154 * Heuristic to force contended enqueues to serialize on a
3155 * separate lock before trying to get qdisc main lock.
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07003156 * This permits qdisc->running owner to get the lock more
Ying Xue9bf2b8c2014-06-26 15:56:31 +08003157 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07003158 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00003159 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003160 if (unlikely(contended))
3161 spin_lock(&q->busylock);
3162
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003163 spin_lock(root_lock);
3164 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
Eric Dumazet520ac302016-06-21 23:16:49 -07003165 __qdisc_drop(skb, &to_free);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003166 rc = NET_XMIT_DROP;
3167 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07003168 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003169 /*
3170 * This is a work-conserving queue; there are no old skbs
3171 * waiting to be sent out; and the qdisc is not running -
3172 * xmit the skb directly.
3173 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003174
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003175 qdisc_bstats_update(q, skb);
3176
Eric Dumazet55a93b32014-10-03 15:31:07 -07003177 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07003178 if (unlikely(contended)) {
3179 spin_unlock(&q->busylock);
3180 contended = false;
3181 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003182 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003183 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07003184 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003185
3186 rc = NET_XMIT_SUCCESS;
3187 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -07003188 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07003189 if (qdisc_run_begin(q)) {
3190 if (unlikely(contended)) {
3191 spin_unlock(&q->busylock);
3192 contended = false;
3193 }
3194 __qdisc_run(q);
3195 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003196 }
3197 spin_unlock(root_lock);
Eric Dumazet520ac302016-06-21 23:16:49 -07003198 if (unlikely(to_free))
3199 kfree_skb_list(to_free);
Eric Dumazet79640a42010-06-02 05:09:29 -07003200 if (unlikely(contended))
3201 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003202 return rc;
3203}
3204
Daniel Borkmann86f85152013-12-29 17:27:11 +01003205#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00003206static void skb_update_prio(struct sk_buff *skb)
3207{
Igor Maravic6977a792011-11-25 07:44:54 +00003208 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00003209
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003210 if (!skb->priority && skb->sk && map) {
Tejun Heo2a56a1f2015-12-07 17:38:52 -05003211 unsigned int prioidx =
3212 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003213
3214 if (prioidx < map->priomap_len)
3215 skb->priority = map->priomap[prioidx];
3216 }
Neil Horman5bc14212011-11-22 05:10:51 +00003217}
3218#else
3219#define skb_update_prio(skb)
3220#endif
3221
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02003222DEFINE_PER_CPU(int, xmit_recursion);
3223EXPORT_SYMBOL(xmit_recursion);
3224
Dave Jonesd29f7492008-07-22 14:09:06 -07003225/**
Michel Machado95603e22012-06-12 10:16:35 +00003226 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003227 * @net: network namespace this loopback is happening in
3228 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00003229 * @skb: buffer to transmit
3230 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003231int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00003232{
3233 skb_reset_mac_header(skb);
3234 __skb_pull(skb, skb_network_offset(skb));
3235 skb->pkt_type = PACKET_LOOPBACK;
3236 skb->ip_summed = CHECKSUM_UNNECESSARY;
3237 WARN_ON(!skb_dst(skb));
3238 skb_dst_force(skb);
3239 netif_rx_ni(skb);
3240 return 0;
3241}
3242EXPORT_SYMBOL(dev_loopback_xmit);
3243
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003244#ifdef CONFIG_NET_EGRESS
3245static struct sk_buff *
3246sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3247{
3248 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3249 struct tcf_result cl_res;
3250
3251 if (!cl)
3252 return skb;
3253
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003254 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003255 qdisc_bstats_cpu_update(cl->q, skb);
3256
Jiri Pirko87d83092017-05-17 11:07:54 +02003257 switch (tcf_classify(skb, cl, &cl_res, false)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003258 case TC_ACT_OK:
3259 case TC_ACT_RECLASSIFY:
3260 skb->tc_index = TC_H_MIN(cl_res.classid);
3261 break;
3262 case TC_ACT_SHOT:
3263 qdisc_qstats_cpu_drop(cl->q);
3264 *ret = NET_XMIT_DROP;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003265 kfree_skb(skb);
3266 return NULL;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003267 case TC_ACT_STOLEN:
3268 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +02003269 case TC_ACT_TRAP:
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003270 *ret = NET_XMIT_SUCCESS;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003271 consume_skb(skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003272 return NULL;
3273 case TC_ACT_REDIRECT:
3274 /* No need to push/pop skb's mac_header here on egress! */
3275 skb_do_redirect(skb);
3276 *ret = NET_XMIT_SUCCESS;
3277 return NULL;
3278 default:
3279 break;
3280 }
3281
3282 return skb;
3283}
3284#endif /* CONFIG_NET_EGRESS */
3285
Jiri Pirko638b2a62015-05-12 14:56:13 +02003286static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3287{
3288#ifdef CONFIG_XPS
3289 struct xps_dev_maps *dev_maps;
3290 struct xps_map *map;
3291 int queue_index = -1;
3292
3293 rcu_read_lock();
3294 dev_maps = rcu_dereference(dev->xps_maps);
3295 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04003296 unsigned int tci = skb->sender_cpu - 1;
3297
3298 if (dev->num_tc) {
3299 tci *= dev->num_tc;
3300 tci += netdev_get_prio_tc_map(dev, skb->priority);
3301 }
3302
3303 map = rcu_dereference(dev_maps->cpu_map[tci]);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003304 if (map) {
3305 if (map->len == 1)
3306 queue_index = map->queues[0];
3307 else
3308 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3309 map->len)];
3310 if (unlikely(queue_index >= dev->real_num_tx_queues))
3311 queue_index = -1;
3312 }
3313 }
3314 rcu_read_unlock();
3315
3316 return queue_index;
3317#else
3318 return -1;
3319#endif
3320}
3321
3322static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3323{
3324 struct sock *sk = skb->sk;
3325 int queue_index = sk_tx_queue_get(sk);
3326
3327 if (queue_index < 0 || skb->ooo_okay ||
3328 queue_index >= dev->real_num_tx_queues) {
3329 int new_index = get_xps_queue(dev, skb);
tchardingf4563a72017-02-09 17:56:07 +11003330
Jiri Pirko638b2a62015-05-12 14:56:13 +02003331 if (new_index < 0)
3332 new_index = skb_tx_hash(dev, skb);
3333
3334 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003335 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003336 rcu_access_pointer(sk->sk_dst_cache))
3337 sk_tx_queue_set(sk, new_index);
3338
3339 queue_index = new_index;
3340 }
3341
3342 return queue_index;
3343}
3344
3345struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3346 struct sk_buff *skb,
3347 void *accel_priv)
3348{
3349 int queue_index = 0;
3350
3351#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08003352 u32 sender_cpu = skb->sender_cpu - 1;
3353
3354 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003355 skb->sender_cpu = raw_smp_processor_id() + 1;
3356#endif
3357
3358 if (dev->real_num_tx_queues != 1) {
3359 const struct net_device_ops *ops = dev->netdev_ops;
tchardingf4563a72017-02-09 17:56:07 +11003360
Jiri Pirko638b2a62015-05-12 14:56:13 +02003361 if (ops->ndo_select_queue)
3362 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3363 __netdev_pick_tx);
3364 else
3365 queue_index = __netdev_pick_tx(dev, skb);
3366
3367 if (!accel_priv)
3368 queue_index = netdev_cap_txqueue(dev, queue_index);
3369 }
3370
3371 skb_set_queue_mapping(skb, queue_index);
3372 return netdev_get_tx_queue(dev, queue_index);
3373}
3374
Michel Machado95603e22012-06-12 10:16:35 +00003375/**
Jason Wang9d08dd32014-01-20 11:25:13 +08003376 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07003377 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08003378 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07003379 *
3380 * Queue a buffer for transmission to a network device. The caller must
3381 * have set the device and priority and built the buffer before calling
3382 * this function. The function can be called from an interrupt.
3383 *
3384 * A negative errno code is returned on a failure. A success does not
3385 * guarantee the frame will be transmitted as it may be dropped due
3386 * to congestion or traffic shaping.
3387 *
3388 * -----------------------------------------------------------------------------------
3389 * I notice this method can also return errors from the queue disciplines,
3390 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3391 * be positive.
3392 *
3393 * Regardless of the return value, the skb is consumed, so it is currently
3394 * difficult to retry a send to this method. (You can bump the ref count
3395 * before sending to hold a reference for retry if you are careful.)
3396 *
3397 * When calling this method, interrupts MUST be enabled. This is because
3398 * the BH enable code must have IRQs enabled so that it will not deadlock.
3399 * --BLG
3400 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05303401static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402{
3403 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07003404 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 struct Qdisc *q;
3406 int rc = -ENOMEM;
3407
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00003408 skb_reset_mac_header(skb);
3409
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003410 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3411 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3412
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003413 /* Disable soft irqs for various locks below. Also
3414 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003416 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
Neil Horman5bc14212011-11-22 05:10:51 +00003418 skb_update_prio(skb);
3419
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003420 qdisc_pkt_len_init(skb);
3421#ifdef CONFIG_NET_CLS_ACT
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003422 skb->tc_at_ingress = 0;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003423# ifdef CONFIG_NET_EGRESS
3424 if (static_key_false(&egress_needed)) {
3425 skb = sch_handle_egress(skb, &rc, dev);
3426 if (!skb)
3427 goto out;
3428 }
3429# endif
3430#endif
Eric Dumazet02875872014-10-05 18:38:35 -07003431 /* If device/qdisc don't need skb->dst, release it right now while
3432 * its hot in this cpu cache.
3433 */
3434 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3435 skb_dst_drop(skb);
3436 else
3437 skb_dst_force(skb);
3438
Jason Wangf663dd92014-01-10 16:18:26 +08003439 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08003440 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07003441
Koki Sanagicf66ba52010-08-23 18:45:02 +09003442 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003444 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07003445 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 }
3447
3448 /* The device has no queue. Common case for software devices:
tchardingeb13da12017-02-09 17:56:06 +11003449 * loopback, all the sorts of tunnels...
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450
tchardingeb13da12017-02-09 17:56:06 +11003451 * Really, it is unlikely that netif_tx_lock protection is necessary
3452 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3453 * counters.)
3454 * However, it is possible, that they rely on protection
3455 * made by us here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456
tchardingeb13da12017-02-09 17:56:06 +11003457 * Check this and shot the lock. It is not prone from deadlocks.
3458 *Either shot noqueue qdisc, it is even simpler 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 */
3460 if (dev->flags & IFF_UP) {
3461 int cpu = smp_processor_id(); /* ok because BHs are off */
3462
David S. Millerc773e842008-07-08 23:13:53 -07003463 if (txq->xmit_lock_owner != cpu) {
Daniel Borkmanna70b5062016-06-10 21:19:06 +02003464 if (unlikely(__this_cpu_read(xmit_recursion) >
3465 XMIT_RECURSION_LIMIT))
Eric Dumazet745e20f2010-09-29 13:23:09 -07003466 goto recursion_alert;
3467
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003468 skb = validate_xmit_skb(skb, dev);
3469 if (!skb)
Eric Dumazetd21fd632016-04-12 21:50:07 -07003470 goto out;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003471
David S. Millerc773e842008-07-08 23:13:53 -07003472 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473
Tom Herbert734664982011-11-28 16:32:44 +00003474 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07003475 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07003476 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07003477 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00003478 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003479 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 goto out;
3481 }
3482 }
David S. Millerc773e842008-07-08 23:13:53 -07003483 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003484 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3485 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 } else {
3487 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003488 * unfortunately
3489 */
3490recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003491 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3492 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 }
3494 }
3495
3496 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07003497 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498
Eric Dumazet015f0682014-03-27 08:45:56 -07003499 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003500 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 return rc;
3502out:
Herbert Xud4828d82006-06-22 02:28:18 -07003503 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 return rc;
3505}
Jason Wangf663dd92014-01-10 16:18:26 +08003506
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003507int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003508{
3509 return __dev_queue_xmit(skb, NULL);
3510}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003511EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512
Jason Wangf663dd92014-01-10 16:18:26 +08003513int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3514{
3515 return __dev_queue_xmit(skb, accel_priv);
3516}
3517EXPORT_SYMBOL(dev_queue_xmit_accel);
3518
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519
tchardingeb13da12017-02-09 17:56:06 +11003520/*************************************************************************
3521 * Receiver routines
3522 *************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003524int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003525EXPORT_SYMBOL(netdev_max_backlog);
3526
Eric Dumazet3b098e22010-05-15 23:57:10 -07003527int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003528int netdev_budget __read_mostly = 300;
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04003529unsigned int __read_mostly netdev_budget_usecs = 2000;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01003530int weight_p __read_mostly = 64; /* old backlog weight */
3531int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3532int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3533int dev_rx_weight __read_mostly = 64;
3534int dev_tx_weight __read_mostly = 64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003536/* Called with irq disabled */
3537static inline void ____napi_schedule(struct softnet_data *sd,
3538 struct napi_struct *napi)
3539{
3540 list_add_tail(&napi->poll_list, &sd->poll_list);
3541 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3542}
3543
Eric Dumazetdf334542010-03-24 19:13:54 +00003544#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003545
3546/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003547struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003548EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003549u32 rps_cpu_mask __read_mostly;
3550EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003551
Ingo Molnarc5905af2012-02-24 08:31:31 +01003552struct static_key rps_needed __read_mostly;
Jason Wang3df97ba2016-04-25 23:13:42 -04003553EXPORT_SYMBOL(rps_needed);
Eric Dumazet13bfff22016-12-07 08:29:10 -08003554struct static_key rfs_needed __read_mostly;
3555EXPORT_SYMBOL(rfs_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +00003556
Ben Hutchingsc4454772011-01-19 11:03:53 +00003557static struct rps_dev_flow *
3558set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3559 struct rps_dev_flow *rflow, u16 next_cpu)
3560{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003561 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003562#ifdef CONFIG_RFS_ACCEL
3563 struct netdev_rx_queue *rxqueue;
3564 struct rps_dev_flow_table *flow_table;
3565 struct rps_dev_flow *old_rflow;
3566 u32 flow_id;
3567 u16 rxq_index;
3568 int rc;
3569
3570 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003571 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3572 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003573 goto out;
3574 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3575 if (rxq_index == skb_get_rx_queue(skb))
3576 goto out;
3577
3578 rxqueue = dev->_rx + rxq_index;
3579 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3580 if (!flow_table)
3581 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003582 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003583 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3584 rxq_index, flow_id);
3585 if (rc < 0)
3586 goto out;
3587 old_rflow = rflow;
3588 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003589 rflow->filter = rc;
3590 if (old_rflow->filter == rflow->filter)
3591 old_rflow->filter = RPS_NO_FILTER;
3592 out:
3593#endif
3594 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003595 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003596 }
3597
Ben Hutchings09994d12011-10-03 04:42:46 +00003598 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003599 return rflow;
3600}
3601
Tom Herbert0a9627f2010-03-16 08:03:29 +00003602/*
3603 * get_rps_cpu is called from netif_receive_skb and returns the target
3604 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003605 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003606 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003607static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3608 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003609{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003610 const struct rps_sock_flow_table *sock_flow_table;
3611 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003612 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003613 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003614 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003615 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003616 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003617
Tom Herbert0a9627f2010-03-16 08:03:29 +00003618 if (skb_rx_queue_recorded(skb)) {
3619 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003620
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003621 if (unlikely(index >= dev->real_num_rx_queues)) {
3622 WARN_ONCE(dev->real_num_rx_queues > 1,
3623 "%s received packet on queue %u, but number "
3624 "of RX queues is %u\n",
3625 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003626 goto done;
3627 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003628 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003629 }
3630
Eric Dumazet567e4b72015-02-06 12:59:01 -08003631 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3632
3633 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3634 map = rcu_dereference(rxqueue->rps_map);
3635 if (!flow_table && !map)
3636 goto done;
3637
Changli Gao2d47b452010-08-17 19:00:56 +00003638 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003639 hash = skb_get_hash(skb);
3640 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003641 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003642
Tom Herbertfec5e652010-04-16 16:01:27 -07003643 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3644 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003645 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003646 u32 next_cpu;
3647 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003648
Eric Dumazet567e4b72015-02-06 12:59:01 -08003649 /* First check into global flow table if there is a match */
3650 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3651 if ((ident ^ hash) & ~rps_cpu_mask)
3652 goto try_rps;
3653
3654 next_cpu = ident & rps_cpu_mask;
3655
3656 /* OK, now we know there is a match,
3657 * we can look at the local (per receive queue) flow table
3658 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003659 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003660 tcpu = rflow->cpu;
3661
Tom Herbertfec5e652010-04-16 16:01:27 -07003662 /*
3663 * If the desired CPU (where last recvmsg was done) is
3664 * different from current CPU (one in the rx-queue flow
3665 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003666 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003667 * - Current CPU is offline.
3668 * - The current CPU's queue tail has advanced beyond the
3669 * last packet that was enqueued using this table entry.
3670 * This guarantees that all previous packets for the flow
3671 * have been dequeued, thus preserving in order delivery.
3672 */
3673 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003674 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003675 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003676 rflow->last_qtail)) >= 0)) {
3677 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003678 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003679 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003680
Eric Dumazeta31196b2015-04-25 09:35:24 -07003681 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003682 *rflowp = rflow;
3683 cpu = tcpu;
3684 goto done;
3685 }
3686 }
3687
Eric Dumazet567e4b72015-02-06 12:59:01 -08003688try_rps:
3689
Tom Herbert0a9627f2010-03-16 08:03:29 +00003690 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003691 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003692 if (cpu_online(tcpu)) {
3693 cpu = tcpu;
3694 goto done;
3695 }
3696 }
3697
3698done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003699 return cpu;
3700}
3701
Ben Hutchingsc4454772011-01-19 11:03:53 +00003702#ifdef CONFIG_RFS_ACCEL
3703
3704/**
3705 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3706 * @dev: Device on which the filter was set
3707 * @rxq_index: RX queue index
3708 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3709 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3710 *
3711 * Drivers that implement ndo_rx_flow_steer() should periodically call
3712 * this function for each installed filter and remove the filters for
3713 * which it returns %true.
3714 */
3715bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3716 u32 flow_id, u16 filter_id)
3717{
3718 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3719 struct rps_dev_flow_table *flow_table;
3720 struct rps_dev_flow *rflow;
3721 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003722 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003723
3724 rcu_read_lock();
3725 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3726 if (flow_table && flow_id <= flow_table->mask) {
3727 rflow = &flow_table->flows[flow_id];
3728 cpu = ACCESS_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003729 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003730 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3731 rflow->last_qtail) <
3732 (int)(10 * flow_table->mask)))
3733 expire = false;
3734 }
3735 rcu_read_unlock();
3736 return expire;
3737}
3738EXPORT_SYMBOL(rps_may_expire_flow);
3739
3740#endif /* CONFIG_RFS_ACCEL */
3741
Tom Herbert0a9627f2010-03-16 08:03:29 +00003742/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003743static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003744{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003745 struct softnet_data *sd = data;
3746
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003747 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003748 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003749}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003750
Tom Herbertfec5e652010-04-16 16:01:27 -07003751#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003752
3753/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003754 * Check if this softnet_data structure is another cpu one
3755 * If yes, queue it to our IPI list and return 1
3756 * If no, return 0
3757 */
3758static int rps_ipi_queued(struct softnet_data *sd)
3759{
3760#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003761 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003762
3763 if (sd != mysd) {
3764 sd->rps_ipi_next = mysd->rps_ipi_list;
3765 mysd->rps_ipi_list = sd;
3766
3767 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3768 return 1;
3769 }
3770#endif /* CONFIG_RPS */
3771 return 0;
3772}
3773
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003774#ifdef CONFIG_NET_FLOW_LIMIT
3775int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3776#endif
3777
3778static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3779{
3780#ifdef CONFIG_NET_FLOW_LIMIT
3781 struct sd_flow_limit *fl;
3782 struct softnet_data *sd;
3783 unsigned int old_flow, new_flow;
3784
3785 if (qlen < (netdev_max_backlog >> 1))
3786 return false;
3787
Christoph Lameter903ceff2014-08-17 12:30:35 -05003788 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003789
3790 rcu_read_lock();
3791 fl = rcu_dereference(sd->flow_limit);
3792 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003793 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003794 old_flow = fl->history[fl->history_head];
3795 fl->history[fl->history_head] = new_flow;
3796
3797 fl->history_head++;
3798 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3799
3800 if (likely(fl->buckets[old_flow]))
3801 fl->buckets[old_flow]--;
3802
3803 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3804 fl->count++;
3805 rcu_read_unlock();
3806 return true;
3807 }
3808 }
3809 rcu_read_unlock();
3810#endif
3811 return false;
3812}
3813
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003814/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003815 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3816 * queue (may be a remote CPU queue).
3817 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003818static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3819 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003820{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003821 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003822 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003823 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003824
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003825 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003826
3827 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003828
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003829 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003830 if (!netif_running(skb->dev))
3831 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003832 qlen = skb_queue_len(&sd->input_pkt_queue);
3833 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003834 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003835enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003836 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003837 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003838 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003839 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003840 return NET_RX_SUCCESS;
3841 }
3842
Eric Dumazetebda37c22010-05-06 23:51:21 +00003843 /* Schedule NAPI for backlog device
3844 * We can use non atomic operation since we own the queue lock
3845 */
3846 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003847 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003848 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003849 }
3850 goto enqueue;
3851 }
3852
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003853drop:
Changli Gaodee42872010-05-02 05:42:16 +00003854 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003855 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003856
Tom Herbert0a9627f2010-03-16 08:03:29 +00003857 local_irq_restore(flags);
3858
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003859 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003860 kfree_skb(skb);
3861 return NET_RX_DROP;
3862}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863
John Fastabendd4455162017-07-17 09:26:45 -07003864static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3865 struct bpf_prog *xdp_prog)
3866{
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003867 u32 metalen, act = XDP_DROP;
John Fastabendd4455162017-07-17 09:26:45 -07003868 struct xdp_buff xdp;
John Fastabendd4455162017-07-17 09:26:45 -07003869 void *orig_data;
3870 int hlen, off;
3871 u32 mac_len;
3872
3873 /* Reinjected packets coming from act_mirred or similar should
3874 * not get XDP generic processing.
3875 */
3876 if (skb_cloned(skb))
3877 return XDP_PASS;
3878
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003879 /* XDP packets must be linear and must have sufficient headroom
3880 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
3881 * native XDP provides, thus we need to do it here as well.
3882 */
3883 if (skb_is_nonlinear(skb) ||
3884 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
3885 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
3886 int troom = skb->tail + skb->data_len - skb->end;
3887
3888 /* In case we have to go down the path and also linearize,
3889 * then lets do the pskb_expand_head() work just once here.
3890 */
3891 if (pskb_expand_head(skb,
3892 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
3893 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
3894 goto do_drop;
3895 if (troom > 0 && __skb_linearize(skb))
3896 goto do_drop;
3897 }
John Fastabendd4455162017-07-17 09:26:45 -07003898
3899 /* The XDP program wants to see the packet starting at the MAC
3900 * header.
3901 */
3902 mac_len = skb->data - skb_mac_header(skb);
3903 hlen = skb_headlen(skb) + mac_len;
3904 xdp.data = skb->data - mac_len;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003905 xdp.data_meta = xdp.data;
John Fastabendd4455162017-07-17 09:26:45 -07003906 xdp.data_end = xdp.data + hlen;
3907 xdp.data_hard_start = skb->data - skb_headroom(skb);
3908 orig_data = xdp.data;
3909
3910 act = bpf_prog_run_xdp(xdp_prog, &xdp);
3911
3912 off = xdp.data - orig_data;
3913 if (off > 0)
3914 __skb_pull(skb, off);
3915 else if (off < 0)
3916 __skb_push(skb, -off);
Edward Cree92dd5452017-09-19 18:45:56 +01003917 skb->mac_header += off;
John Fastabendd4455162017-07-17 09:26:45 -07003918
3919 switch (act) {
John Fastabend6103aa92017-07-17 09:27:50 -07003920 case XDP_REDIRECT:
John Fastabendd4455162017-07-17 09:26:45 -07003921 case XDP_TX:
3922 __skb_push(skb, mac_len);
John Fastabendd4455162017-07-17 09:26:45 -07003923 break;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003924 case XDP_PASS:
3925 metalen = xdp.data - xdp.data_meta;
3926 if (metalen)
3927 skb_metadata_set(skb, metalen);
3928 break;
John Fastabendd4455162017-07-17 09:26:45 -07003929 default:
3930 bpf_warn_invalid_xdp_action(act);
3931 /* fall through */
3932 case XDP_ABORTED:
3933 trace_xdp_exception(skb->dev, xdp_prog, act);
3934 /* fall through */
3935 case XDP_DROP:
3936 do_drop:
3937 kfree_skb(skb);
3938 break;
3939 }
3940
3941 return act;
3942}
3943
3944/* When doing generic XDP we have to bypass the qdisc layer and the
3945 * network taps in order to match in-driver-XDP behavior.
3946 */
Jason Wang7c497472017-08-11 19:41:17 +08003947void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
John Fastabendd4455162017-07-17 09:26:45 -07003948{
3949 struct net_device *dev = skb->dev;
3950 struct netdev_queue *txq;
3951 bool free_skb = true;
3952 int cpu, rc;
3953
3954 txq = netdev_pick_tx(dev, skb, NULL);
3955 cpu = smp_processor_id();
3956 HARD_TX_LOCK(dev, txq, cpu);
3957 if (!netif_xmit_stopped(txq)) {
3958 rc = netdev_start_xmit(skb, dev, txq, 0);
3959 if (dev_xmit_complete(rc))
3960 free_skb = false;
3961 }
3962 HARD_TX_UNLOCK(dev, txq);
3963 if (free_skb) {
3964 trace_xdp_exception(dev, xdp_prog, XDP_TX);
3965 kfree_skb(skb);
3966 }
3967}
Jason Wang7c497472017-08-11 19:41:17 +08003968EXPORT_SYMBOL_GPL(generic_xdp_tx);
John Fastabendd4455162017-07-17 09:26:45 -07003969
3970static struct static_key generic_xdp_needed __read_mostly;
3971
Jason Wang7c497472017-08-11 19:41:17 +08003972int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
John Fastabendd4455162017-07-17 09:26:45 -07003973{
John Fastabendd4455162017-07-17 09:26:45 -07003974 if (xdp_prog) {
3975 u32 act = netif_receive_generic_xdp(skb, xdp_prog);
John Fastabend6103aa92017-07-17 09:27:50 -07003976 int err;
John Fastabendd4455162017-07-17 09:26:45 -07003977
3978 if (act != XDP_PASS) {
John Fastabend6103aa92017-07-17 09:27:50 -07003979 switch (act) {
3980 case XDP_REDIRECT:
Jesper Dangaard Brouer2facaad2017-08-24 12:33:08 +02003981 err = xdp_do_generic_redirect(skb->dev, skb,
3982 xdp_prog);
John Fastabend6103aa92017-07-17 09:27:50 -07003983 if (err)
3984 goto out_redir;
3985 /* fallthru to submit skb */
3986 case XDP_TX:
John Fastabendd4455162017-07-17 09:26:45 -07003987 generic_xdp_tx(skb, xdp_prog);
John Fastabend6103aa92017-07-17 09:27:50 -07003988 break;
3989 }
John Fastabendd4455162017-07-17 09:26:45 -07003990 return XDP_DROP;
3991 }
3992 }
3993 return XDP_PASS;
John Fastabend6103aa92017-07-17 09:27:50 -07003994out_redir:
John Fastabend6103aa92017-07-17 09:27:50 -07003995 kfree_skb(skb);
3996 return XDP_DROP;
John Fastabendd4455162017-07-17 09:26:45 -07003997}
Jason Wang7c497472017-08-11 19:41:17 +08003998EXPORT_SYMBOL_GPL(do_xdp_generic);
John Fastabendd4455162017-07-17 09:26:45 -07003999
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004000static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004002 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003
Eric Dumazet588f0332011-11-15 04:12:55 +00004004 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005
Koki Sanagicf66ba52010-08-23 18:45:02 +09004006 trace_netif_rx(skb);
John Fastabendd4455162017-07-17 09:26:45 -07004007
4008 if (static_key_false(&generic_xdp_needed)) {
John Fastabendbbbe2112017-09-08 14:00:30 -07004009 int ret;
4010
4011 preempt_disable();
4012 rcu_read_lock();
4013 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4014 rcu_read_unlock();
4015 preempt_enable();
John Fastabendd4455162017-07-17 09:26:45 -07004016
John Fastabend6103aa92017-07-17 09:27:50 -07004017 /* Consider XDP consuming the packet a success from
4018 * the netdev point of view we do not want to count
4019 * this as an error.
4020 */
John Fastabendd4455162017-07-17 09:26:45 -07004021 if (ret != XDP_PASS)
John Fastabend6103aa92017-07-17 09:27:50 -07004022 return NET_RX_SUCCESS;
John Fastabendd4455162017-07-17 09:26:45 -07004023 }
4024
Eric Dumazetdf334542010-03-24 19:13:54 +00004025#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004026 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07004027 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004028 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029
Changli Gaocece1942010-08-07 20:35:43 -07004030 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004031 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07004032
4033 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004034 if (cpu < 0)
4035 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07004036
4037 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4038
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004039 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07004040 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00004041 } else
4042#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07004043 {
4044 unsigned int qtail;
tchardingf4563a72017-02-09 17:56:07 +11004045
Tom Herbertfec5e652010-04-16 16:01:27 -07004046 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4047 put_cpu();
4048 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004049 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004051
4052/**
4053 * netif_rx - post buffer to the network code
4054 * @skb: buffer to post
4055 *
4056 * This function receives a packet from a device driver and queues it for
4057 * the upper (protocol) levels to process. It always succeeds. The buffer
4058 * may be dropped during processing for congestion control or by the
4059 * protocol layers.
4060 *
4061 * return values:
4062 * NET_RX_SUCCESS (no congestion)
4063 * NET_RX_DROP (packet was dropped)
4064 *
4065 */
4066
4067int netif_rx(struct sk_buff *skb)
4068{
4069 trace_netif_rx_entry(skb);
4070
4071 return netif_rx_internal(skb);
4072}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004073EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
4075int netif_rx_ni(struct sk_buff *skb)
4076{
4077 int err;
4078
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004079 trace_netif_rx_ni_entry(skb);
4080
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004082 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 if (local_softirq_pending())
4084 do_softirq();
4085 preempt_enable();
4086
4087 return err;
4088}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089EXPORT_SYMBOL(netif_rx_ni);
4090
Emese Revfy0766f782016-06-20 20:42:34 +02004091static __latent_entropy void net_tx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092{
Christoph Lameter903ceff2014-08-17 12:30:35 -05004093 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094
4095 if (sd->completion_queue) {
4096 struct sk_buff *clist;
4097
4098 local_irq_disable();
4099 clist = sd->completion_queue;
4100 sd->completion_queue = NULL;
4101 local_irq_enable();
4102
4103 while (clist) {
4104 struct sk_buff *skb = clist;
tchardingf4563a72017-02-09 17:56:07 +11004105
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106 clist = clist->next;
4107
Reshetova, Elena63354792017-06-30 13:07:58 +03004108 WARN_ON(refcount_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08004109 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4110 trace_consume_skb(skb);
4111 else
4112 trace_kfree_skb(skb, net_tx_action);
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01004113
4114 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4115 __kfree_skb(skb);
4116 else
4117 __kfree_skb_defer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 }
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01004119
4120 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121 }
4122
4123 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07004124 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125
4126 local_irq_disable();
4127 head = sd->output_queue;
4128 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00004129 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130 local_irq_enable();
4131
4132 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07004133 struct Qdisc *q = head;
4134 spinlock_t *root_lock;
4135
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136 head = head->next_sched;
4137
David S. Miller5fb66222008-08-02 20:02:43 -07004138 root_lock = qdisc_lock(q);
Eric Dumazet3bcb8462016-06-04 20:02:28 -07004139 spin_lock(root_lock);
4140 /* We need to make sure head->next_sched is read
4141 * before clearing __QDISC_STATE_SCHED
4142 */
4143 smp_mb__before_atomic();
4144 clear_bit(__QDISC_STATE_SCHED, &q->state);
4145 qdisc_run(q);
4146 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147 }
4148 }
4149}
4150
Javier Martinez Canillas181402a2016-09-09 08:43:15 -04004151#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
Michał Mirosławda678292009-06-05 05:35:28 +00004152/* This hook is defined here for ATM LANE */
4153int (*br_fdb_test_addr_hook)(struct net_device *dev,
4154 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07004155EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00004156#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004158static inline struct sk_buff *
4159sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4160 struct net_device *orig_dev)
Herbert Xuf697c3e2007-10-14 00:38:47 -07004161{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004162#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004163 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
4164 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00004165
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004166 /* If there's at least one ingress present somewhere (so
4167 * we get here via enabled static key), remaining devices
4168 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004169 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004170 */
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004171 if (!cl)
Daniel Borkmann45771392015-04-10 23:07:54 +02004172 return skb;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004173 if (*pt_prev) {
4174 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4175 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004176 }
4177
Florian Westphal33654952015-05-14 00:36:28 +02004178 qdisc_skb_cb(skb)->pkt_len = skb->len;
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05004179 skb->tc_at_ingress = 1;
Eric Dumazet24ea5912015-07-06 05:18:03 -07004180 qdisc_bstats_cpu_update(cl->q, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004181
Jiri Pirko87d83092017-05-17 11:07:54 +02004182 switch (tcf_classify(skb, cl, &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004183 case TC_ACT_OK:
4184 case TC_ACT_RECLASSIFY:
4185 skb->tc_index = TC_H_MIN(cl_res.classid);
4186 break;
4187 case TC_ACT_SHOT:
Eric Dumazet24ea5912015-07-06 05:18:03 -07004188 qdisc_qstats_cpu_drop(cl->q);
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004189 kfree_skb(skb);
4190 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004191 case TC_ACT_STOLEN:
4192 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +02004193 case TC_ACT_TRAP:
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004194 consume_skb(skb);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004195 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07004196 case TC_ACT_REDIRECT:
4197 /* skb_mac_header check was done by cls/act_bpf, so
4198 * we can safely push the L2 header back before
4199 * redirecting to another netdev
4200 */
4201 __skb_push(skb, skb->mac_len);
4202 skb_do_redirect(skb);
4203 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004204 default:
4205 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004206 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004207#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07004208 return skb;
4209}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004211/**
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07004212 * netdev_is_rx_handler_busy - check if receive handler is registered
4213 * @dev: device to check
4214 *
4215 * Check if a receive handler is already registered for a given device.
4216 * Return true if there one.
4217 *
4218 * The caller must hold the rtnl_mutex.
4219 */
4220bool netdev_is_rx_handler_busy(struct net_device *dev)
4221{
4222 ASSERT_RTNL();
4223 return dev && rtnl_dereference(dev->rx_handler);
4224}
4225EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4226
4227/**
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004228 * netdev_rx_handler_register - register receive handler
4229 * @dev: device to register a handler for
4230 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00004231 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004232 *
Masanari Iidae2278672014-02-18 22:54:36 +09004233 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004234 * called from __netif_receive_skb. A negative errno code is returned
4235 * on a failure.
4236 *
4237 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004238 *
4239 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004240 */
4241int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00004242 rx_handler_func_t *rx_handler,
4243 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004244{
Mahesh Bandewar1b7cd002017-01-18 15:02:49 -08004245 if (netdev_is_rx_handler_busy(dev))
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004246 return -EBUSY;
4247
Eric Dumazet00cfec32013-03-29 03:01:22 +00004248 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00004249 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004250 rcu_assign_pointer(dev->rx_handler, rx_handler);
4251
4252 return 0;
4253}
4254EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4255
4256/**
4257 * netdev_rx_handler_unregister - unregister receive handler
4258 * @dev: device to unregister a handler from
4259 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00004260 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004261 *
4262 * The caller must hold the rtnl_mutex.
4263 */
4264void netdev_rx_handler_unregister(struct net_device *dev)
4265{
4266
4267 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004268 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00004269 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4270 * section has a guarantee to see a non NULL rx_handler_data
4271 * as well.
4272 */
4273 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004274 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004275}
4276EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4277
Mel Gormanb4b9e352012-07-31 16:44:26 -07004278/*
4279 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4280 * the special handling of PFMEMALLOC skbs.
4281 */
4282static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4283{
4284 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07004285 case htons(ETH_P_ARP):
4286 case htons(ETH_P_IP):
4287 case htons(ETH_P_IPV6):
4288 case htons(ETH_P_8021Q):
4289 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07004290 return true;
4291 default:
4292 return false;
4293 }
4294}
4295
Pablo Neirae687ad62015-05-13 18:19:38 +02004296static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4297 int *ret, struct net_device *orig_dev)
4298{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004299#ifdef CONFIG_NETFILTER_INGRESS
Pablo Neirae687ad62015-05-13 18:19:38 +02004300 if (nf_hook_ingress_active(skb)) {
Aaron Conole2c1e2702016-09-21 11:35:03 -04004301 int ingress_retval;
4302
Pablo Neirae687ad62015-05-13 18:19:38 +02004303 if (*pt_prev) {
4304 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4305 *pt_prev = NULL;
4306 }
4307
Aaron Conole2c1e2702016-09-21 11:35:03 -04004308 rcu_read_lock();
4309 ingress_retval = nf_hook_ingress(skb);
4310 rcu_read_unlock();
4311 return ingress_retval;
Pablo Neirae687ad62015-05-13 18:19:38 +02004312 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004313#endif /* CONFIG_NETFILTER_INGRESS */
Pablo Neirae687ad62015-05-13 18:19:38 +02004314 return 0;
4315}
Pablo Neirae687ad62015-05-13 18:19:38 +02004316
David S. Miller9754e292013-02-14 15:57:38 -05004317static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318{
4319 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004320 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07004321 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004322 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08004324 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325
Eric Dumazet588f0332011-11-15 04:12:55 +00004326 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07004327
Koki Sanagicf66ba52010-08-23 18:45:02 +09004328 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08004329
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07004330 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00004331
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07004332 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00004333 if (!skb_transport_header_was_set(skb))
4334 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00004335 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336
4337 pt_prev = NULL;
4338
David S. Miller63d8ea72011-02-28 10:48:59 -08004339another_round:
David S. Millerb6858172012-07-23 16:27:54 -07004340 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08004341
4342 __this_cpu_inc(softnet_data.processed);
4343
Patrick McHardy8ad227f2013-04-19 02:04:31 +00004344 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4345 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004346 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004347 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004348 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004349 }
4350
Willem de Bruijne7246e12017-01-07 17:06:35 -05004351 if (skb_skip_tc_classify(skb))
4352 goto skip_classify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353
David S. Miller9754e292013-02-14 15:57:38 -05004354 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07004355 goto skip_taps;
4356
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08004358 if (pt_prev)
4359 ret = deliver_skb(skb, pt_prev, orig_dev);
4360 pt_prev = ptype;
4361 }
4362
4363 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4364 if (pt_prev)
4365 ret = deliver_skb(skb, pt_prev, orig_dev);
4366 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 }
4368
Mel Gormanb4b9e352012-07-31 16:44:26 -07004369skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02004370#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02004371 if (static_key_false(&ingress_needed)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004372 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
Daniel Borkmann45771392015-04-10 23:07:54 +02004373 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004374 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02004375
4376 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004377 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02004378 }
Pablo Neira1cf519002015-05-13 18:19:37 +02004379#endif
Willem de Bruijna5135bc2017-01-07 17:06:36 -05004380 skb_reset_tc(skb);
Willem de Bruijne7246e12017-01-07 17:06:35 -05004381skip_classify:
David S. Miller9754e292013-02-14 15:57:38 -05004382 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07004383 goto drop;
4384
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004385 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00004386 if (pt_prev) {
4387 ret = deliver_skb(skb, pt_prev, orig_dev);
4388 pt_prev = NULL;
4389 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004390 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00004391 goto another_round;
4392 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004393 goto out;
John Fastabend24257172011-10-10 09:16:41 +00004394 }
4395
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004396 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004397 if (rx_handler) {
4398 if (pt_prev) {
4399 ret = deliver_skb(skb, pt_prev, orig_dev);
4400 pt_prev = NULL;
4401 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004402 switch (rx_handler(&skb)) {
4403 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00004404 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004405 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004406 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08004407 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004408 case RX_HANDLER_EXACT:
4409 deliver_exact = true;
4410 case RX_HANDLER_PASS:
4411 break;
4412 default:
4413 BUG();
4414 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004417 if (unlikely(skb_vlan_tag_present(skb))) {
4418 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07004419 skb->pkt_type = PACKET_OTHERHOST;
4420 /* Note: we might in the future use prio bits
4421 * and set skb->priority like in vlan_do_receive()
4422 * For the time being, just ignore Priority Code Point
4423 */
4424 skb->vlan_tci = 0;
4425 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004426
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08004428
4429 /* deliver only exact match when indicated */
4430 if (likely(!deliver_exact)) {
4431 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4432 &ptype_base[ntohs(type) &
4433 PTYPE_HASH_MASK]);
4434 }
4435
4436 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4437 &orig_dev->ptype_specific);
4438
4439 if (unlikely(skb->dev != orig_dev)) {
4440 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4441 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 }
4443
4444 if (pt_prev) {
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04004445 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00004446 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004447 else
4448 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07004450drop:
Jarod Wilson6e7333d2016-02-01 18:51:05 -05004451 if (!deliver_exact)
4452 atomic_long_inc(&skb->dev->rx_dropped);
4453 else
4454 atomic_long_inc(&skb->dev->rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 kfree_skb(skb);
4456 /* Jamal, now you will not able to escape explaining
4457 * me how you were going to use this. :-)
4458 */
4459 ret = NET_RX_DROP;
4460 }
4461
Julian Anastasov2c17d272015-07-09 09:59:10 +03004462out:
David S. Miller9754e292013-02-14 15:57:38 -05004463 return ret;
4464}
4465
4466static int __netif_receive_skb(struct sk_buff *skb)
4467{
4468 int ret;
4469
4470 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004471 unsigned int noreclaim_flag;
David S. Miller9754e292013-02-14 15:57:38 -05004472
4473 /*
4474 * PFMEMALLOC skbs are special, they should
4475 * - be delivered to SOCK_MEMALLOC sockets only
4476 * - stay away from userspace
4477 * - have bounded memory usage
4478 *
4479 * Use PF_MEMALLOC as this saves us from propagating the allocation
4480 * context down to all allocation sites.
4481 */
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004482 noreclaim_flag = memalloc_noreclaim_save();
David S. Miller9754e292013-02-14 15:57:38 -05004483 ret = __netif_receive_skb_core(skb, true);
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004484 memalloc_noreclaim_restore(noreclaim_flag);
David S. Miller9754e292013-02-14 15:57:38 -05004485 } else
4486 ret = __netif_receive_skb_core(skb, false);
4487
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488 return ret;
4489}
Tom Herbert0a9627f2010-03-16 08:03:29 +00004490
David S. Millerb5cdae32017-04-18 15:36:58 -04004491static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
4492{
Martin KaFai Lau58038692017-06-15 17:29:09 -07004493 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
David S. Millerb5cdae32017-04-18 15:36:58 -04004494 struct bpf_prog *new = xdp->prog;
4495 int ret = 0;
4496
4497 switch (xdp->command) {
Martin KaFai Lau58038692017-06-15 17:29:09 -07004498 case XDP_SETUP_PROG:
David S. Millerb5cdae32017-04-18 15:36:58 -04004499 rcu_assign_pointer(dev->xdp_prog, new);
4500 if (old)
4501 bpf_prog_put(old);
4502
4503 if (old && !new) {
4504 static_key_slow_dec(&generic_xdp_needed);
4505 } else if (new && !old) {
4506 static_key_slow_inc(&generic_xdp_needed);
4507 dev_disable_lro(dev);
4508 }
4509 break;
David S. Millerb5cdae32017-04-18 15:36:58 -04004510
4511 case XDP_QUERY_PROG:
Martin KaFai Lau58038692017-06-15 17:29:09 -07004512 xdp->prog_attached = !!old;
4513 xdp->prog_id = old ? old->aux->id : 0;
David S. Millerb5cdae32017-04-18 15:36:58 -04004514 break;
4515
4516 default:
4517 ret = -EINVAL;
4518 break;
4519 }
4520
4521 return ret;
4522}
4523
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004524static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004525{
Julian Anastasov2c17d272015-07-09 09:59:10 +03004526 int ret;
4527
Eric Dumazet588f0332011-11-15 04:12:55 +00004528 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07004529
Richard Cochranc1f19b52010-07-17 08:49:36 +00004530 if (skb_defer_rx_timestamp(skb))
4531 return NET_RX_SUCCESS;
4532
David S. Millerb5cdae32017-04-18 15:36:58 -04004533 if (static_key_false(&generic_xdp_needed)) {
John Fastabendbbbe2112017-09-08 14:00:30 -07004534 int ret;
David S. Millerb5cdae32017-04-18 15:36:58 -04004535
John Fastabendbbbe2112017-09-08 14:00:30 -07004536 preempt_disable();
4537 rcu_read_lock();
4538 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4539 rcu_read_unlock();
4540 preempt_enable();
4541
4542 if (ret != XDP_PASS)
John Fastabendd4455162017-07-17 09:26:45 -07004543 return NET_RX_DROP;
David S. Millerb5cdae32017-04-18 15:36:58 -04004544 }
4545
John Fastabendbbbe2112017-09-08 14:00:30 -07004546 rcu_read_lock();
Eric Dumazetdf334542010-03-24 19:13:54 +00004547#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004548 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07004549 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004550 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07004551
Eric Dumazet3b098e22010-05-15 23:57:10 -07004552 if (cpu >= 0) {
4553 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4554 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00004555 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004556 }
Tom Herbertfec5e652010-04-16 16:01:27 -07004557 }
Tom Herbert1e94d722010-03-18 17:45:44 -07004558#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03004559 ret = __netif_receive_skb(skb);
4560 rcu_read_unlock();
4561 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004562}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004563
4564/**
4565 * netif_receive_skb - process receive buffer from network
4566 * @skb: buffer to process
4567 *
4568 * netif_receive_skb() is the main receive data processing function.
4569 * It always succeeds. The buffer may be dropped during processing
4570 * for congestion control or by the protocol layers.
4571 *
4572 * This function may only be called from softirq context and interrupts
4573 * should be enabled.
4574 *
4575 * Return values (usually ignored):
4576 * NET_RX_SUCCESS: no congestion
4577 * NET_RX_DROP: packet was dropped
4578 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004579int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004580{
4581 trace_netif_receive_skb_entry(skb);
4582
4583 return netif_receive_skb_internal(skb);
4584}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004585EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586
Eric Dumazet41852492016-08-26 12:50:39 -07004587DEFINE_PER_CPU(struct work_struct, flush_works);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004588
4589/* Network device is going away, flush any packets still pending */
4590static void flush_backlog(struct work_struct *work)
4591{
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004592 struct sk_buff *skb, *tmp;
4593 struct softnet_data *sd;
4594
4595 local_bh_disable();
4596 sd = this_cpu_ptr(&softnet_data);
4597
4598 local_irq_disable();
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004599 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004600 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004601 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004602 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004603 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004604 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004605 }
Changli Gao6e7676c2010-04-27 15:07:33 -07004606 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004607 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004608 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004609
4610 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004611 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Changli Gao6e7676c2010-04-27 15:07:33 -07004612 __skb_unlink(skb, &sd->process_queue);
4613 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004614 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004615 }
4616 }
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004617 local_bh_enable();
4618}
4619
Eric Dumazet41852492016-08-26 12:50:39 -07004620static void flush_all_backlogs(void)
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004621{
4622 unsigned int cpu;
4623
4624 get_online_cpus();
4625
Eric Dumazet41852492016-08-26 12:50:39 -07004626 for_each_online_cpu(cpu)
4627 queue_work_on(cpu, system_highpri_wq,
4628 per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004629
4630 for_each_online_cpu(cpu)
Eric Dumazet41852492016-08-26 12:50:39 -07004631 flush_work(per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004632
4633 put_online_cpus();
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004634}
4635
Herbert Xud565b0a2008-12-15 23:38:52 -08004636static int napi_gro_complete(struct sk_buff *skb)
4637{
Vlad Yasevich22061d82012-11-15 08:49:11 +00004638 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004639 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004640 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08004641 int err = -ENOENT;
4642
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004643 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4644
Herbert Xufc59f9a2009-04-14 15:11:06 -07004645 if (NAPI_GRO_CB(skb)->count == 1) {
4646 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004647 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07004648 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004649
4650 rcu_read_lock();
4651 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004652 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08004653 continue;
4654
Jerry Chu299603e82013-12-11 20:53:45 -08004655 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004656 break;
4657 }
4658 rcu_read_unlock();
4659
4660 if (err) {
4661 WARN_ON(&ptype->list == head);
4662 kfree_skb(skb);
4663 return NET_RX_SUCCESS;
4664 }
4665
4666out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004667 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004668}
4669
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004670/* napi->gro_list contains packets ordered by age.
4671 * youngest packets at the head of it.
4672 * Complete skbs in reverse order to reduce latencies.
4673 */
4674void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08004675{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004676 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004677
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004678 /* scan list and build reverse chain */
4679 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4680 skb->prev = prev;
4681 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08004682 }
4683
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004684 for (skb = prev; skb; skb = prev) {
4685 skb->next = NULL;
4686
4687 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4688 return;
4689
4690 prev = skb->prev;
4691 napi_gro_complete(skb);
4692 napi->gro_count--;
4693 }
4694
Herbert Xud565b0a2008-12-15 23:38:52 -08004695 napi->gro_list = NULL;
4696}
Eric Dumazet86cac582010-08-31 18:25:32 +00004697EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08004698
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004699static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4700{
4701 struct sk_buff *p;
4702 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08004703 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004704
4705 for (p = napi->gro_list; p; p = p->next) {
4706 unsigned long diffs;
4707
Tom Herbert0b4cec82014-01-15 08:58:06 -08004708 NAPI_GRO_CB(p)->flush = 0;
4709
4710 if (hash != skb_get_hash_raw(p)) {
4711 NAPI_GRO_CB(p)->same_flow = 0;
4712 continue;
4713 }
4714
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004715 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4716 diffs |= p->vlan_tci ^ skb->vlan_tci;
Jesse Grossce87fc62016-01-20 17:59:49 -08004717 diffs |= skb_metadata_dst_cmp(p, skb);
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004718 diffs |= skb_metadata_differs(p, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004719 if (maclen == ETH_HLEN)
4720 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004721 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004722 else if (!diffs)
4723 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004724 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004725 maclen);
4726 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004727 }
4728}
4729
Jerry Chu299603e82013-12-11 20:53:45 -08004730static void skb_gro_reset_offset(struct sk_buff *skb)
4731{
4732 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4733 const skb_frag_t *frag0 = &pinfo->frags[0];
4734
4735 NAPI_GRO_CB(skb)->data_offset = 0;
4736 NAPI_GRO_CB(skb)->frag0 = NULL;
4737 NAPI_GRO_CB(skb)->frag0_len = 0;
4738
4739 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4740 pinfo->nr_frags &&
4741 !PageHighMem(skb_frag_page(frag0))) {
4742 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
Eric Dumazet7cfd5fd2017-01-10 19:52:43 -08004743 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4744 skb_frag_size(frag0),
4745 skb->end - skb->tail);
Herbert Xud565b0a2008-12-15 23:38:52 -08004746 }
4747}
4748
Eric Dumazeta50e2332014-03-29 21:28:21 -07004749static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4750{
4751 struct skb_shared_info *pinfo = skb_shinfo(skb);
4752
4753 BUG_ON(skb->end - skb->tail < grow);
4754
4755 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4756
4757 skb->data_len -= grow;
4758 skb->tail += grow;
4759
4760 pinfo->frags[0].page_offset += grow;
4761 skb_frag_size_sub(&pinfo->frags[0], grow);
4762
4763 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4764 skb_frag_unref(skb, 0);
4765 memmove(pinfo->frags, pinfo->frags + 1,
4766 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4767 }
4768}
4769
Rami Rosenbb728822012-11-28 21:55:25 +00004770static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004771{
4772 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004773 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004774 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004775 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004776 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004777 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004778 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004779
David S. Millerb5cdae32017-04-18 15:36:58 -04004780 if (netif_elide_gro(skb->dev))
Herbert Xud565b0a2008-12-15 23:38:52 -08004781 goto normal;
4782
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004783 gro_list_prepare(napi, skb);
4784
Herbert Xud565b0a2008-12-15 23:38:52 -08004785 rcu_read_lock();
4786 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004787 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004788 continue;
4789
Herbert Xu86911732009-01-29 14:19:50 +00004790 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004791 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004792 NAPI_GRO_CB(skb)->same_flow = 0;
Eric Dumazetd61d0722016-11-07 11:12:27 -08004793 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
Herbert Xu5d38a072009-01-04 16:13:40 -08004794 NAPI_GRO_CB(skb)->free = 0;
Jesse Grossfac8e0f2016-03-19 09:32:01 -07004795 NAPI_GRO_CB(skb)->encap_mark = 0;
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02004796 NAPI_GRO_CB(skb)->recursion_counter = 0;
Alexander Duycka0ca1532016-04-05 09:13:39 -07004797 NAPI_GRO_CB(skb)->is_fou = 0;
Alexander Duyck15305452016-04-10 21:44:57 -04004798 NAPI_GRO_CB(skb)->is_atomic = 1;
Tom Herbert15e23962015-02-10 16:30:31 -08004799 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004800
Tom Herbert662880f2014-08-27 21:26:56 -07004801 /* Setup for GRO checksum validation */
4802 switch (skb->ip_summed) {
4803 case CHECKSUM_COMPLETE:
4804 NAPI_GRO_CB(skb)->csum = skb->csum;
4805 NAPI_GRO_CB(skb)->csum_valid = 1;
4806 NAPI_GRO_CB(skb)->csum_cnt = 0;
4807 break;
4808 case CHECKSUM_UNNECESSARY:
4809 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4810 NAPI_GRO_CB(skb)->csum_valid = 0;
4811 break;
4812 default:
4813 NAPI_GRO_CB(skb)->csum_cnt = 0;
4814 NAPI_GRO_CB(skb)->csum_valid = 0;
4815 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004816
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004817 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004818 break;
4819 }
4820 rcu_read_unlock();
4821
4822 if (&ptype->list == head)
4823 goto normal;
4824
Steffen Klassert25393d32017-02-15 09:39:44 +01004825 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4826 ret = GRO_CONSUMED;
4827 goto ok;
4828 }
4829
Herbert Xu0da2afd52008-12-26 14:57:42 -08004830 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004831 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004832
Herbert Xud565b0a2008-12-15 23:38:52 -08004833 if (pp) {
4834 struct sk_buff *nskb = *pp;
4835
4836 *pp = nskb->next;
4837 nskb->next = NULL;
4838 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00004839 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08004840 }
4841
Herbert Xu0da2afd52008-12-26 14:57:42 -08004842 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08004843 goto ok;
4844
Eric Dumazet600adc12014-01-09 14:12:19 -08004845 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08004846 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08004847
Eric Dumazet600adc12014-01-09 14:12:19 -08004848 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4849 struct sk_buff *nskb = napi->gro_list;
4850
4851 /* locate the end of the list to select the 'oldest' flow */
4852 while (nskb->next) {
4853 pp = &nskb->next;
4854 nskb = *pp;
4855 }
4856 *pp = NULL;
4857 nskb->next = NULL;
4858 napi_gro_complete(nskb);
4859 } else {
4860 napi->gro_count++;
4861 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004862 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004863 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004864 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004865 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004866 skb->next = napi->gro_list;
4867 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004868 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004869
Herbert Xuad0f9902009-02-01 01:24:55 -08004870pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004871 grow = skb_gro_offset(skb) - skb_headlen(skb);
4872 if (grow > 0)
4873 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004874ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004875 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004876
4877normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004878 ret = GRO_NORMAL;
4879 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004880}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004881
Jerry Chubf5a7552014-01-07 10:23:19 -08004882struct packet_offload *gro_find_receive_by_type(__be16 type)
4883{
4884 struct list_head *offload_head = &offload_base;
4885 struct packet_offload *ptype;
4886
4887 list_for_each_entry_rcu(ptype, offload_head, list) {
4888 if (ptype->type != type || !ptype->callbacks.gro_receive)
4889 continue;
4890 return ptype;
4891 }
4892 return NULL;
4893}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004894EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004895
4896struct packet_offload *gro_find_complete_by_type(__be16 type)
4897{
4898 struct list_head *offload_head = &offload_base;
4899 struct packet_offload *ptype;
4900
4901 list_for_each_entry_rcu(ptype, offload_head, list) {
4902 if (ptype->type != type || !ptype->callbacks.gro_complete)
4903 continue;
4904 return ptype;
4905 }
4906 return NULL;
4907}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004908EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004909
Michal Kubečeke44699d2017-06-29 11:13:36 +02004910static void napi_skb_free_stolen_head(struct sk_buff *skb)
4911{
4912 skb_dst_drop(skb);
4913 secpath_reset(skb);
4914 kmem_cache_free(skbuff_head_cache, skb);
4915}
4916
Rami Rosenbb728822012-11-28 21:55:25 +00004917static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004918{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004919 switch (ret) {
4920 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004921 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004922 ret = GRO_DROP;
4923 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004924
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004925 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004926 kfree_skb(skb);
4927 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004928
Eric Dumazetdaa86542012-04-19 07:07:40 +00004929 case GRO_MERGED_FREE:
Michal Kubečeke44699d2017-06-29 11:13:36 +02004930 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4931 napi_skb_free_stolen_head(skb);
4932 else
Eric Dumazetd7e88832012-04-30 08:10:34 +00004933 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00004934 break;
4935
Ben Hutchings5b252f02009-10-29 07:17:09 +00004936 case GRO_HELD:
4937 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004938 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004939 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004940 }
4941
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004942 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004943}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004944
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004945gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004946{
Eric Dumazet93f93a42015-11-18 06:30:59 -08004947 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004948 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004949
Eric Dumazeta50e2332014-03-29 21:28:21 -07004950 skb_gro_reset_offset(skb);
4951
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004952 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004953}
4954EXPORT_SYMBOL(napi_gro_receive);
4955
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004956static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004957{
Eric Dumazet93a35f52014-10-23 06:30:30 -07004958 if (unlikely(skb->pfmemalloc)) {
4959 consume_skb(skb);
4960 return;
4961 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004962 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004963 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4964 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004965 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004966 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004967 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07004968 skb->encapsulation = 0;
4969 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004970 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Steffen Klassertf991bb92017-01-30 06:45:38 +01004971 secpath_reset(skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004972
4973 napi->skb = skb;
4974}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004975
Herbert Xu76620aa2009-04-16 02:02:07 -07004976struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004977{
Herbert Xu5d38a072009-01-04 16:13:40 -08004978 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004979
4980 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08004981 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08004982 if (skb) {
4983 napi->skb = skb;
4984 skb_mark_napi_id(skb, napi);
4985 }
Herbert Xu5d38a072009-01-04 16:13:40 -08004986 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004987 return skb;
4988}
Herbert Xu76620aa2009-04-16 02:02:07 -07004989EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004990
Eric Dumazeta50e2332014-03-29 21:28:21 -07004991static gro_result_t napi_frags_finish(struct napi_struct *napi,
4992 struct sk_buff *skb,
4993 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004994{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004995 switch (ret) {
4996 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004997 case GRO_HELD:
4998 __skb_push(skb, ETH_HLEN);
4999 skb->protocol = eth_type_trans(skb, skb->dev);
5000 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005001 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00005002 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005003
5004 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005005 napi_reuse_skb(napi, skb);
5006 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00005007
Michal Kubečeke44699d2017-06-29 11:13:36 +02005008 case GRO_MERGED_FREE:
5009 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5010 napi_skb_free_stolen_head(skb);
5011 else
5012 napi_reuse_skb(napi, skb);
5013 break;
5014
Ben Hutchings5b252f02009-10-29 07:17:09 +00005015 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01005016 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00005017 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005018 }
5019
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005020 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005021}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005022
Eric Dumazeta50e2332014-03-29 21:28:21 -07005023/* Upper GRO stack assumes network header starts at gro_offset=0
5024 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5025 * We copy ethernet header into skb->data to have a common layout.
5026 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00005027static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08005028{
Herbert Xu76620aa2009-04-16 02:02:07 -07005029 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07005030 const struct ethhdr *eth;
5031 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07005032
5033 napi->skb = NULL;
5034
Eric Dumazeta50e2332014-03-29 21:28:21 -07005035 skb_reset_mac_header(skb);
5036 skb_gro_reset_offset(skb);
5037
5038 eth = skb_gro_header_fast(skb, 0);
5039 if (unlikely(skb_gro_header_hard(skb, hlen))) {
5040 eth = skb_gro_header_slow(skb, hlen, 0);
5041 if (unlikely(!eth)) {
Aaron Conole4da46ce2016-04-02 15:26:43 -04005042 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5043 __func__, napi->dev->name);
Eric Dumazeta50e2332014-03-29 21:28:21 -07005044 napi_reuse_skb(napi, skb);
5045 return NULL;
5046 }
5047 } else {
5048 gro_pull_from_frag0(skb, hlen);
5049 NAPI_GRO_CB(skb)->frag0 += hlen;
5050 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07005051 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07005052 __skb_pull(skb, hlen);
5053
5054 /*
5055 * This works because the only protocols we care about don't require
5056 * special handling.
5057 * We'll fix it up properly in napi_frags_finish()
5058 */
5059 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07005060
Herbert Xu76620aa2009-04-16 02:02:07 -07005061 return skb;
5062}
Herbert Xu76620aa2009-04-16 02:02:07 -07005063
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005064gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07005065{
5066 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08005067
5068 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005069 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08005070
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005071 trace_napi_gro_frags_entry(skb);
5072
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005073 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08005074}
5075EXPORT_SYMBOL(napi_gro_frags);
5076
Tom Herbert573e8fc2014-08-22 13:33:47 -07005077/* Compute the checksum from gro_offset and return the folded value
5078 * after adding in any pseudo checksum.
5079 */
5080__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5081{
5082 __wsum wsum;
5083 __sum16 sum;
5084
5085 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5086
5087 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5088 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5089 if (likely(!sum)) {
5090 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5091 !skb->csum_complete_sw)
5092 netdev_rx_csum_fault(skb->dev);
5093 }
5094
5095 NAPI_GRO_CB(skb)->csum = wsum;
5096 NAPI_GRO_CB(skb)->csum_valid = 1;
5097
5098 return sum;
5099}
5100EXPORT_SYMBOL(__skb_gro_checksum_complete);
5101
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05305102static void net_rps_send_ipi(struct softnet_data *remsd)
5103{
5104#ifdef CONFIG_RPS
5105 while (remsd) {
5106 struct softnet_data *next = remsd->rps_ipi_next;
5107
5108 if (cpu_online(remsd->cpu))
5109 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5110 remsd = next;
5111 }
5112#endif
5113}
5114
Eric Dumazete326bed2010-04-22 00:22:45 -07005115/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08005116 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07005117 * Note: called with local irq disabled, but exits with local irq enabled.
5118 */
5119static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5120{
5121#ifdef CONFIG_RPS
5122 struct softnet_data *remsd = sd->rps_ipi_list;
5123
5124 if (remsd) {
5125 sd->rps_ipi_list = NULL;
5126
5127 local_irq_enable();
5128
5129 /* Send pending IPI's to kick RPS processing on remote cpus. */
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05305130 net_rps_send_ipi(remsd);
Eric Dumazete326bed2010-04-22 00:22:45 -07005131 } else
5132#endif
5133 local_irq_enable();
5134}
5135
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005136static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5137{
5138#ifdef CONFIG_RPS
5139 return sd->rps_ipi_list != NULL;
5140#else
5141 return false;
5142#endif
5143}
5144
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005145static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146{
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005147 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005148 bool again = true;
5149 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150
Eric Dumazete326bed2010-04-22 00:22:45 -07005151 /* Check if we have pending ipi, its better to send them now,
5152 * not waiting net_rx_action() end.
5153 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005154 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07005155 local_irq_disable();
5156 net_rps_action_and_irq_enable(sd);
5157 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005158
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01005159 napi->weight = dev_rx_weight;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005160 while (again) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162
Changli Gao6e7676c2010-04-27 15:07:33 -07005163 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03005164 rcu_read_lock();
Changli Gao6e7676c2010-04-27 15:07:33 -07005165 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03005166 rcu_read_unlock();
Tom Herbert76cc8b12010-05-20 18:37:59 +00005167 input_queue_head_incr(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005168 if (++work >= quota)
Tom Herbert76cc8b12010-05-20 18:37:59 +00005169 return work;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005170
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005173 local_irq_disable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005174 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07005175 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005176 /*
5177 * Inline a custom version of __napi_complete().
5178 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07005179 * and NAPI_STATE_SCHED is the only possible flag set
5180 * on backlog.
5181 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005182 * and we dont need an smp_mb() memory barrier.
5183 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005184 napi->state = 0;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005185 again = false;
5186 } else {
5187 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5188 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07005189 }
5190 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005191 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005194 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195}
5196
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005197/**
5198 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005199 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005200 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005201 * The entry's receive function will be scheduled to run.
5202 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005203 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08005204void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005205{
5206 unsigned long flags;
5207
5208 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05005209 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005210 local_irq_restore(flags);
5211}
5212EXPORT_SYMBOL(__napi_schedule);
5213
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005214/**
Eric Dumazet39e6c822017-02-28 10:34:50 -08005215 * napi_schedule_prep - check if napi can be scheduled
5216 * @n: napi context
5217 *
5218 * Test if NAPI routine is already running, and if not mark
5219 * it as running. This is used as a condition variable
5220 * insure only one NAPI poll instance runs. We also make
5221 * sure there is no pending NAPI disable.
5222 */
5223bool napi_schedule_prep(struct napi_struct *n)
5224{
5225 unsigned long val, new;
5226
5227 do {
5228 val = READ_ONCE(n->state);
5229 if (unlikely(val & NAPIF_STATE_DISABLE))
5230 return false;
5231 new = val | NAPIF_STATE_SCHED;
5232
5233 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5234 * This was suggested by Alexander Duyck, as compiler
5235 * emits better code than :
5236 * if (val & NAPIF_STATE_SCHED)
5237 * new |= NAPIF_STATE_MISSED;
5238 */
5239 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5240 NAPIF_STATE_MISSED;
5241 } while (cmpxchg(&n->state, val, new) != val);
5242
5243 return !(val & NAPIF_STATE_SCHED);
5244}
5245EXPORT_SYMBOL(napi_schedule_prep);
5246
5247/**
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005248 * __napi_schedule_irqoff - schedule for receive
5249 * @n: entry to schedule
5250 *
5251 * Variant of __napi_schedule() assuming hard irqs are masked
5252 */
5253void __napi_schedule_irqoff(struct napi_struct *n)
5254{
5255 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5256}
5257EXPORT_SYMBOL(__napi_schedule_irqoff);
5258
Eric Dumazet364b6052016-11-15 10:15:13 -08005259bool napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08005260{
Eric Dumazet39e6c822017-02-28 10:34:50 -08005261 unsigned long flags, val, new;
Herbert Xud565b0a2008-12-15 23:38:52 -08005262
5263 /*
Eric Dumazet217f6972016-11-15 10:15:11 -08005264 * 1) Don't let napi dequeue from the cpu poll list
5265 * just in case its running on a different cpu.
5266 * 2) If we are busy polling, do nothing here, we have
5267 * the guarantee we will be called later.
Herbert Xud565b0a2008-12-15 23:38:52 -08005268 */
Eric Dumazet217f6972016-11-15 10:15:11 -08005269 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5270 NAPIF_STATE_IN_BUSY_POLL)))
Eric Dumazet364b6052016-11-15 10:15:13 -08005271 return false;
Herbert Xud565b0a2008-12-15 23:38:52 -08005272
Eric Dumazet3b47d302014-11-06 21:09:44 -08005273 if (n->gro_list) {
5274 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005275
Eric Dumazet3b47d302014-11-06 21:09:44 -08005276 if (work_done)
5277 timeout = n->dev->gro_flush_timeout;
5278
5279 if (timeout)
5280 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5281 HRTIMER_MODE_REL_PINNED);
5282 else
5283 napi_gro_flush(n, false);
5284 }
Eric Dumazet02c16022017-02-04 15:25:02 -08005285 if (unlikely(!list_empty(&n->poll_list))) {
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005286 /* If n->poll_list is not empty, we need to mask irqs */
5287 local_irq_save(flags);
Eric Dumazet02c16022017-02-04 15:25:02 -08005288 list_del_init(&n->poll_list);
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005289 local_irq_restore(flags);
5290 }
Eric Dumazet39e6c822017-02-28 10:34:50 -08005291
5292 do {
5293 val = READ_ONCE(n->state);
5294
5295 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5296
5297 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5298
5299 /* If STATE_MISSED was set, leave STATE_SCHED set,
5300 * because we will call napi->poll() one more time.
5301 * This C code was suggested by Alexander Duyck to help gcc.
5302 */
5303 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5304 NAPIF_STATE_SCHED;
5305 } while (cmpxchg(&n->state, val, new) != val);
5306
5307 if (unlikely(val & NAPIF_STATE_MISSED)) {
5308 __napi_schedule(n);
5309 return false;
5310 }
5311
Eric Dumazet364b6052016-11-15 10:15:13 -08005312 return true;
Herbert Xud565b0a2008-12-15 23:38:52 -08005313}
Eric Dumazet3b47d302014-11-06 21:09:44 -08005314EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08005315
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005316/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08005317static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005318{
5319 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5320 struct napi_struct *napi;
5321
5322 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5323 if (napi->napi_id == napi_id)
5324 return napi;
5325
5326 return NULL;
5327}
Eric Dumazet02d62e82015-11-18 06:30:52 -08005328
5329#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazet217f6972016-11-15 10:15:11 -08005330
Eric Dumazetce6aea92015-11-18 06:30:54 -08005331#define BUSY_POLL_BUDGET 8
Eric Dumazet217f6972016-11-15 10:15:11 -08005332
5333static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5334{
5335 int rc;
5336
Eric Dumazet39e6c822017-02-28 10:34:50 -08005337 /* Busy polling means there is a high chance device driver hard irq
5338 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5339 * set in napi_schedule_prep().
5340 * Since we are about to call napi->poll() once more, we can safely
5341 * clear NAPI_STATE_MISSED.
5342 *
5343 * Note: x86 could use a single "lock and ..." instruction
5344 * to perform these two clear_bit()
5345 */
5346 clear_bit(NAPI_STATE_MISSED, &napi->state);
Eric Dumazet217f6972016-11-15 10:15:11 -08005347 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5348
5349 local_bh_disable();
5350
5351 /* All we really want here is to re-enable device interrupts.
5352 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5353 */
5354 rc = napi->poll(napi, BUSY_POLL_BUDGET);
Jesper Dangaard Brouer1e223912017-08-25 15:04:32 +02005355 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
Eric Dumazet217f6972016-11-15 10:15:11 -08005356 netpoll_poll_unlock(have_poll_lock);
5357 if (rc == BUSY_POLL_BUDGET)
5358 __napi_schedule(napi);
5359 local_bh_enable();
Eric Dumazet217f6972016-11-15 10:15:11 -08005360}
5361
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005362void napi_busy_loop(unsigned int napi_id,
5363 bool (*loop_end)(void *, unsigned long),
5364 void *loop_end_arg)
Eric Dumazet02d62e82015-11-18 06:30:52 -08005365{
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005366 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
Eric Dumazet217f6972016-11-15 10:15:11 -08005367 int (*napi_poll)(struct napi_struct *napi, int budget);
Eric Dumazet217f6972016-11-15 10:15:11 -08005368 void *have_poll_lock = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005369 struct napi_struct *napi;
Eric Dumazet217f6972016-11-15 10:15:11 -08005370
5371restart:
Eric Dumazet217f6972016-11-15 10:15:11 -08005372 napi_poll = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005373
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005374 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005375
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005376 napi = napi_by_id(napi_id);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005377 if (!napi)
5378 goto out;
5379
Eric Dumazet217f6972016-11-15 10:15:11 -08005380 preempt_disable();
5381 for (;;) {
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005382 int work = 0;
5383
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005384 local_bh_disable();
Eric Dumazet217f6972016-11-15 10:15:11 -08005385 if (!napi_poll) {
5386 unsigned long val = READ_ONCE(napi->state);
5387
5388 /* If multiple threads are competing for this napi,
5389 * we avoid dirtying napi->state as much as we can.
5390 */
5391 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5392 NAPIF_STATE_IN_BUSY_POLL))
5393 goto count;
5394 if (cmpxchg(&napi->state, val,
5395 val | NAPIF_STATE_IN_BUSY_POLL |
5396 NAPIF_STATE_SCHED) != val)
5397 goto count;
5398 have_poll_lock = netpoll_poll_lock(napi);
5399 napi_poll = napi->poll;
5400 }
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005401 work = napi_poll(napi, BUSY_POLL_BUDGET);
5402 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
Eric Dumazet217f6972016-11-15 10:15:11 -08005403count:
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005404 if (work > 0)
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005405 __NET_ADD_STATS(dev_net(napi->dev),
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005406 LINUX_MIB_BUSYPOLLRXPACKETS, work);
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005407 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005408
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005409 if (!loop_end || loop_end(loop_end_arg, start_time))
Eric Dumazet217f6972016-11-15 10:15:11 -08005410 break;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005411
Eric Dumazet217f6972016-11-15 10:15:11 -08005412 if (unlikely(need_resched())) {
5413 if (napi_poll)
5414 busy_poll_stop(napi, have_poll_lock);
5415 preempt_enable();
5416 rcu_read_unlock();
5417 cond_resched();
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005418 if (loop_end(loop_end_arg, start_time))
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005419 return;
Eric Dumazet217f6972016-11-15 10:15:11 -08005420 goto restart;
5421 }
Linus Torvalds6cdf89b2016-12-12 10:48:02 -08005422 cpu_relax();
Eric Dumazet217f6972016-11-15 10:15:11 -08005423 }
5424 if (napi_poll)
5425 busy_poll_stop(napi, have_poll_lock);
5426 preempt_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005427out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005428 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005429}
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005430EXPORT_SYMBOL(napi_busy_loop);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005431
5432#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005433
Eric Dumazet149d6ad2016-11-08 11:07:28 -08005434static void napi_hash_add(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005435{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005436 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5437 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005438 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005439
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005440 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005441
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005442 /* 0..NR_CPUS range is reserved for sender_cpu use */
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005443 do {
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005444 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5445 napi_gen_id = MIN_NAPI_ID;
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005446 } while (napi_by_id(napi_gen_id));
5447 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005448
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005449 hlist_add_head_rcu(&napi->napi_hash_node,
5450 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005451
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005452 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005453}
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005454
5455/* Warning : caller is responsible to make sure rcu grace period
5456 * is respected before freeing memory containing @napi
5457 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08005458bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005459{
Eric Dumazet34cbe272015-11-18 06:31:02 -08005460 bool rcu_sync_needed = false;
5461
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005462 spin_lock(&napi_hash_lock);
5463
Eric Dumazet34cbe272015-11-18 06:31:02 -08005464 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5465 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005466 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005467 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005468 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005469 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005470}
5471EXPORT_SYMBOL_GPL(napi_hash_del);
5472
Eric Dumazet3b47d302014-11-06 21:09:44 -08005473static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5474{
5475 struct napi_struct *napi;
5476
5477 napi = container_of(timer, struct napi_struct, timer);
Eric Dumazet39e6c822017-02-28 10:34:50 -08005478
5479 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5480 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5481 */
5482 if (napi->gro_list && !napi_disable_pending(napi) &&
5483 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5484 __napi_schedule_irqoff(napi);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005485
5486 return HRTIMER_NORESTART;
5487}
5488
Herbert Xud565b0a2008-12-15 23:38:52 -08005489void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5490 int (*poll)(struct napi_struct *, int), int weight)
5491{
5492 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005493 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5494 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00005495 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005496 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08005497 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08005498 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00005499 if (weight > NAPI_POLL_WEIGHT)
5500 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5501 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08005502 napi->weight = weight;
5503 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005504 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08005505#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08005506 napi->poll_owner = -1;
5507#endif
5508 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08005509 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005510}
5511EXPORT_SYMBOL(netif_napi_add);
5512
Eric Dumazet3b47d302014-11-06 21:09:44 -08005513void napi_disable(struct napi_struct *n)
5514{
5515 might_sleep();
5516 set_bit(NAPI_STATE_DISABLE, &n->state);
5517
5518 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5519 msleep(1);
Neil Horman2d8bff1262015-09-23 14:57:58 -04005520 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5521 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005522
5523 hrtimer_cancel(&n->timer);
5524
5525 clear_bit(NAPI_STATE_DISABLE, &n->state);
5526}
5527EXPORT_SYMBOL(napi_disable);
5528
Eric Dumazet93d05d42015-11-18 06:31:03 -08005529/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08005530void netif_napi_del(struct napi_struct *napi)
5531{
Eric Dumazet93d05d42015-11-18 06:31:03 -08005532 might_sleep();
5533 if (napi_hash_del(napi))
5534 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08005535 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07005536 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005537
Eric Dumazet289dccb2013-12-20 14:29:08 -08005538 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005539 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005540 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005541}
5542EXPORT_SYMBOL(netif_napi_del);
5543
Herbert Xu726ce702014-12-21 07:16:21 +11005544static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5545{
5546 void *have;
5547 int work, weight;
5548
5549 list_del_init(&n->poll_list);
5550
5551 have = netpoll_poll_lock(n);
5552
5553 weight = n->weight;
5554
5555 /* This NAPI_STATE_SCHED test is for avoiding a race
5556 * with netpoll's poll_napi(). Only the entity which
5557 * obtains the lock and sees NAPI_STATE_SCHED set will
5558 * actually make the ->poll() call. Therefore we avoid
5559 * accidentally calling ->poll() when NAPI is not scheduled.
5560 */
5561 work = 0;
5562 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5563 work = n->poll(n, weight);
Jesper Dangaard Brouer1db19db2016-07-07 18:01:32 +02005564 trace_napi_poll(n, work, weight);
Herbert Xu726ce702014-12-21 07:16:21 +11005565 }
5566
5567 WARN_ON_ONCE(work > weight);
5568
5569 if (likely(work < weight))
5570 goto out_unlock;
5571
5572 /* Drivers must not modify the NAPI state if they
5573 * consume the entire weight. In such cases this code
5574 * still "owns" the NAPI instance and therefore can
5575 * move the instance around on the list at-will.
5576 */
5577 if (unlikely(napi_disable_pending(n))) {
5578 napi_complete(n);
5579 goto out_unlock;
5580 }
5581
5582 if (n->gro_list) {
5583 /* flush too old packets
5584 * If HZ < 1000, flush all packets.
5585 */
5586 napi_gro_flush(n, HZ >= 1000);
5587 }
5588
Herbert Xu001ce542014-12-21 07:16:22 +11005589 /* Some drivers may have called napi_schedule
5590 * prior to exhausting their budget.
5591 */
5592 if (unlikely(!list_empty(&n->poll_list))) {
5593 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5594 n->dev ? n->dev->name : "backlog");
5595 goto out_unlock;
5596 }
5597
Herbert Xu726ce702014-12-21 07:16:21 +11005598 list_add_tail(&n->poll_list, repoll);
5599
5600out_unlock:
5601 netpoll_poll_unlock(have);
5602
5603 return work;
5604}
5605
Emese Revfy0766f782016-06-20 20:42:34 +02005606static __latent_entropy void net_rx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005607{
Christoph Lameter903ceff2014-08-17 12:30:35 -05005608 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04005609 unsigned long time_limit = jiffies +
5610 usecs_to_jiffies(netdev_budget_usecs);
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07005611 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005612 LIST_HEAD(list);
5613 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07005614
Linus Torvalds1da177e2005-04-16 15:20:36 -07005615 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005616 list_splice_init(&sd->poll_list, &list);
5617 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005618
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005619 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005620 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005621
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005622 if (list_empty(&list)) {
5623 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005624 goto out;
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005625 break;
5626 }
5627
Herbert Xu6bd373e2014-12-21 07:16:24 +11005628 n = list_first_entry(&list, struct napi_struct, poll_list);
5629 budget -= napi_poll(n, &repoll);
5630
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005631 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005632 * Allow this to run for 2 jiffies since which will allow
5633 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005634 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005635 if (unlikely(budget <= 0 ||
5636 time_after_eq(jiffies, time_limit))) {
5637 sd->time_squeeze++;
5638 break;
5639 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005641
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005642 local_irq_disable();
5643
5644 list_splice_tail_init(&sd->poll_list, &list);
5645 list_splice_tail(&repoll, &list);
5646 list_splice(&list, &sd->poll_list);
5647 if (!list_empty(&sd->poll_list))
5648 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5649
Eric Dumazete326bed2010-04-22 00:22:45 -07005650 net_rps_action_and_irq_enable(sd);
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005651out:
5652 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653}
5654
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005655struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005656 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005657
5658 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005659 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005660
Veaceslav Falico5d261912013-08-28 23:25:05 +02005661 /* counter for the number of times this device was added to us */
5662 u16 ref_nr;
5663
Veaceslav Falico402dae92013-09-25 09:20:09 +02005664 /* private field for the users */
5665 void *private;
5666
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005667 struct list_head list;
5668 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005669};
5670
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005671static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005672 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005673{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005674 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005675
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005676 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005677 if (adj->dev == adj_dev)
5678 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005679 }
5680 return NULL;
5681}
5682
David Ahernf1170fd2016-10-17 19:15:51 -07005683static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5684{
5685 struct net_device *dev = data;
5686
5687 return upper_dev == dev;
5688}
5689
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005690/**
5691 * netdev_has_upper_dev - Check if device is linked to an upper device
5692 * @dev: device
5693 * @upper_dev: upper device to check
5694 *
5695 * Find out if a device is linked to specified upper device and return true
5696 * in case it is. Note that this checks only immediate upper device,
5697 * not through a complete stack of devices. The caller must hold the RTNL lock.
5698 */
5699bool netdev_has_upper_dev(struct net_device *dev,
5700 struct net_device *upper_dev)
5701{
5702 ASSERT_RTNL();
5703
David Ahernf1170fd2016-10-17 19:15:51 -07005704 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5705 upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005706}
5707EXPORT_SYMBOL(netdev_has_upper_dev);
5708
5709/**
David Ahern1a3f0602016-10-17 19:15:44 -07005710 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5711 * @dev: device
5712 * @upper_dev: upper device to check
5713 *
5714 * Find out if a device is linked to specified upper device and return true
5715 * in case it is. Note that this checks the entire upper device chain.
5716 * The caller must hold rcu lock.
5717 */
5718
David Ahern1a3f0602016-10-17 19:15:44 -07005719bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5720 struct net_device *upper_dev)
5721{
5722 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5723 upper_dev);
5724}
5725EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5726
5727/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005728 * netdev_has_any_upper_dev - Check if device is linked to some device
5729 * @dev: device
5730 *
5731 * Find out if a device is linked to an upper device and return true in case
5732 * it is. The caller must hold the RTNL lock.
5733 */
Ido Schimmel25cc72a2017-09-01 10:52:31 +02005734bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005735{
5736 ASSERT_RTNL();
5737
David Ahernf1170fd2016-10-17 19:15:51 -07005738 return !list_empty(&dev->adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005739}
Ido Schimmel25cc72a2017-09-01 10:52:31 +02005740EXPORT_SYMBOL(netdev_has_any_upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005741
5742/**
5743 * netdev_master_upper_dev_get - Get master upper device
5744 * @dev: device
5745 *
5746 * Find a master upper device and return pointer to it or NULL in case
5747 * it's not there. The caller must hold the RTNL lock.
5748 */
5749struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5750{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005751 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005752
5753 ASSERT_RTNL();
5754
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005755 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005756 return NULL;
5757
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005758 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005759 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005760 if (likely(upper->master))
5761 return upper->dev;
5762 return NULL;
5763}
5764EXPORT_SYMBOL(netdev_master_upper_dev_get);
5765
David Ahern0f524a82016-10-17 19:15:52 -07005766/**
5767 * netdev_has_any_lower_dev - Check if device is linked to some device
5768 * @dev: device
5769 *
5770 * Find out if a device is linked to a lower device and return true in case
5771 * it is. The caller must hold the RTNL lock.
5772 */
5773static bool netdev_has_any_lower_dev(struct net_device *dev)
5774{
5775 ASSERT_RTNL();
5776
5777 return !list_empty(&dev->adj_list.lower);
5778}
5779
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02005780void *netdev_adjacent_get_private(struct list_head *adj_list)
5781{
5782 struct netdev_adjacent *adj;
5783
5784 adj = list_entry(adj_list, struct netdev_adjacent, list);
5785
5786 return adj->private;
5787}
5788EXPORT_SYMBOL(netdev_adjacent_get_private);
5789
Veaceslav Falico31088a12013-09-25 09:20:12 +02005790/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04005791 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5792 * @dev: device
5793 * @iter: list_head ** of the current position
5794 *
5795 * Gets the next device from the dev's upper list, starting from iter
5796 * position. The caller must hold RCU read lock.
5797 */
5798struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5799 struct list_head **iter)
5800{
5801 struct netdev_adjacent *upper;
5802
5803 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5804
5805 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5806
5807 if (&upper->list == &dev->adj_list.upper)
5808 return NULL;
5809
5810 *iter = &upper->list;
5811
5812 return upper->dev;
5813}
5814EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5815
David Ahern1a3f0602016-10-17 19:15:44 -07005816static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5817 struct list_head **iter)
5818{
5819 struct netdev_adjacent *upper;
5820
5821 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5822
5823 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5824
5825 if (&upper->list == &dev->adj_list.upper)
5826 return NULL;
5827
5828 *iter = &upper->list;
5829
5830 return upper->dev;
5831}
5832
5833int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5834 int (*fn)(struct net_device *dev,
5835 void *data),
5836 void *data)
5837{
5838 struct net_device *udev;
5839 struct list_head *iter;
5840 int ret;
5841
5842 for (iter = &dev->adj_list.upper,
5843 udev = netdev_next_upper_dev_rcu(dev, &iter);
5844 udev;
5845 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5846 /* first is the upper device itself */
5847 ret = fn(udev, data);
5848 if (ret)
5849 return ret;
5850
5851 /* then look at all of its upper devices */
5852 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5853 if (ret)
5854 return ret;
5855 }
5856
5857 return 0;
5858}
5859EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5860
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005861/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005862 * netdev_lower_get_next_private - Get the next ->private from the
5863 * lower neighbour list
5864 * @dev: device
5865 * @iter: list_head ** of the current position
5866 *
5867 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5868 * list, starting from iter position. The caller must hold either hold the
5869 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005870 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02005871 */
5872void *netdev_lower_get_next_private(struct net_device *dev,
5873 struct list_head **iter)
5874{
5875 struct netdev_adjacent *lower;
5876
5877 lower = list_entry(*iter, struct netdev_adjacent, list);
5878
5879 if (&lower->list == &dev->adj_list.lower)
5880 return NULL;
5881
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005882 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005883
5884 return lower->private;
5885}
5886EXPORT_SYMBOL(netdev_lower_get_next_private);
5887
5888/**
5889 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5890 * lower neighbour list, RCU
5891 * variant
5892 * @dev: device
5893 * @iter: list_head ** of the current position
5894 *
5895 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5896 * list, starting from iter position. The caller must hold RCU read lock.
5897 */
5898void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5899 struct list_head **iter)
5900{
5901 struct netdev_adjacent *lower;
5902
5903 WARN_ON_ONCE(!rcu_read_lock_held());
5904
5905 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5906
5907 if (&lower->list == &dev->adj_list.lower)
5908 return NULL;
5909
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005910 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005911
5912 return lower->private;
5913}
5914EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5915
5916/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005917 * netdev_lower_get_next - Get the next device from the lower neighbour
5918 * list
5919 * @dev: device
5920 * @iter: list_head ** of the current position
5921 *
5922 * Gets the next netdev_adjacent from the dev's lower neighbour
5923 * list, starting from iter position. The caller must hold RTNL lock or
5924 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005925 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005926 */
5927void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5928{
5929 struct netdev_adjacent *lower;
5930
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005931 lower = list_entry(*iter, struct netdev_adjacent, list);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005932
5933 if (&lower->list == &dev->adj_list.lower)
5934 return NULL;
5935
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005936 *iter = lower->list.next;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005937
5938 return lower->dev;
5939}
5940EXPORT_SYMBOL(netdev_lower_get_next);
5941
David Ahern1a3f0602016-10-17 19:15:44 -07005942static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5943 struct list_head **iter)
5944{
5945 struct netdev_adjacent *lower;
5946
David Ahern46b5ab12016-10-26 13:21:33 -07005947 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
David Ahern1a3f0602016-10-17 19:15:44 -07005948
5949 if (&lower->list == &dev->adj_list.lower)
5950 return NULL;
5951
David Ahern46b5ab12016-10-26 13:21:33 -07005952 *iter = &lower->list;
David Ahern1a3f0602016-10-17 19:15:44 -07005953
5954 return lower->dev;
5955}
5956
5957int netdev_walk_all_lower_dev(struct net_device *dev,
5958 int (*fn)(struct net_device *dev,
5959 void *data),
5960 void *data)
5961{
5962 struct net_device *ldev;
5963 struct list_head *iter;
5964 int ret;
5965
5966 for (iter = &dev->adj_list.lower,
5967 ldev = netdev_next_lower_dev(dev, &iter);
5968 ldev;
5969 ldev = netdev_next_lower_dev(dev, &iter)) {
5970 /* first is the lower device itself */
5971 ret = fn(ldev, data);
5972 if (ret)
5973 return ret;
5974
5975 /* then look at all of its lower devices */
5976 ret = netdev_walk_all_lower_dev(ldev, fn, data);
5977 if (ret)
5978 return ret;
5979 }
5980
5981 return 0;
5982}
5983EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
5984
David Ahern1a3f0602016-10-17 19:15:44 -07005985static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5986 struct list_head **iter)
5987{
5988 struct netdev_adjacent *lower;
5989
5990 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5991 if (&lower->list == &dev->adj_list.lower)
5992 return NULL;
5993
5994 *iter = &lower->list;
5995
5996 return lower->dev;
5997}
5998
5999int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6000 int (*fn)(struct net_device *dev,
6001 void *data),
6002 void *data)
6003{
6004 struct net_device *ldev;
6005 struct list_head *iter;
6006 int ret;
6007
6008 for (iter = &dev->adj_list.lower,
6009 ldev = netdev_next_lower_dev_rcu(dev, &iter);
6010 ldev;
6011 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6012 /* first is the lower device itself */
6013 ret = fn(ldev, data);
6014 if (ret)
6015 return ret;
6016
6017 /* then look at all of its lower devices */
6018 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6019 if (ret)
6020 return ret;
6021 }
6022
6023 return 0;
6024}
6025EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6026
Jiri Pirko7ce856a2016-07-04 08:23:12 +02006027/**
dingtianhonge001bfa2013-12-13 10:19:55 +08006028 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6029 * lower neighbour list, RCU
6030 * variant
6031 * @dev: device
6032 *
6033 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6034 * list. The caller must hold RCU read lock.
6035 */
6036void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6037{
6038 struct netdev_adjacent *lower;
6039
6040 lower = list_first_or_null_rcu(&dev->adj_list.lower,
6041 struct netdev_adjacent, list);
6042 if (lower)
6043 return lower->private;
6044 return NULL;
6045}
6046EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6047
6048/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006049 * netdev_master_upper_dev_get_rcu - Get master upper device
6050 * @dev: device
6051 *
6052 * Find a master upper device and return pointer to it or NULL in case
6053 * it's not there. The caller must hold the RCU read lock.
6054 */
6055struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6056{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02006057 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006058
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006059 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02006060 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006061 if (upper && likely(upper->master))
6062 return upper->dev;
6063 return NULL;
6064}
6065EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6066
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05306067static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006068 struct net_device *adj_dev,
6069 struct list_head *dev_list)
6070{
6071 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11006072
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006073 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6074 "upper_%s" : "lower_%s", adj_dev->name);
6075 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6076 linkname);
6077}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05306078static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006079 char *name,
6080 struct list_head *dev_list)
6081{
6082 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11006083
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006084 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6085 "upper_%s" : "lower_%s", name);
6086 sysfs_remove_link(&(dev->dev.kobj), linkname);
6087}
6088
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006089static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6090 struct net_device *adj_dev,
6091 struct list_head *dev_list)
6092{
6093 return (dev_list == &dev->adj_list.upper ||
6094 dev_list == &dev->adj_list.lower) &&
6095 net_eq(dev_net(dev), dev_net(adj_dev));
6096}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006097
Veaceslav Falico5d261912013-08-28 23:25:05 +02006098static int __netdev_adjacent_dev_insert(struct net_device *dev,
6099 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02006100 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006101 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006102{
6103 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006104 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006105
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006106 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006107
6108 if (adj) {
David Ahern790510d2016-10-17 19:15:43 -07006109 adj->ref_nr += 1;
David Ahern67b62f92016-10-17 19:15:53 -07006110 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6111 dev->name, adj_dev->name, adj->ref_nr);
6112
Veaceslav Falico5d261912013-08-28 23:25:05 +02006113 return 0;
6114 }
6115
6116 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6117 if (!adj)
6118 return -ENOMEM;
6119
6120 adj->dev = adj_dev;
6121 adj->master = master;
David Ahern790510d2016-10-17 19:15:43 -07006122 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006123 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006124 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006125
David Ahern67b62f92016-10-17 19:15:53 -07006126 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6127 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006128
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006129 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006130 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006131 if (ret)
6132 goto free_adj;
6133 }
6134
Veaceslav Falico7863c052013-09-25 09:20:06 +02006135 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006136 if (master) {
6137 ret = sysfs_create_link(&(dev->dev.kobj),
6138 &(adj_dev->dev.kobj), "master");
6139 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02006140 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006141
Veaceslav Falico7863c052013-09-25 09:20:06 +02006142 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006143 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02006144 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006145 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006146
6147 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006148
Veaceslav Falico5831d662013-09-25 09:20:32 +02006149remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006150 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006151 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006152free_adj:
6153 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02006154 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006155
6156 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006157}
6158
stephen hemminger1d143d92013-12-29 14:01:29 -08006159static void __netdev_adjacent_dev_remove(struct net_device *dev,
6160 struct net_device *adj_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006161 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006162 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006163{
6164 struct netdev_adjacent *adj;
6165
David Ahern67b62f92016-10-17 19:15:53 -07006166 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6167 dev->name, adj_dev->name, ref_nr);
6168
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006169 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006170
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006171 if (!adj) {
David Ahern67b62f92016-10-17 19:15:53 -07006172 pr_err("Adjacency does not exist for device %s from %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006173 dev->name, adj_dev->name);
David Ahern67b62f92016-10-17 19:15:53 -07006174 WARN_ON(1);
6175 return;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006176 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006177
Andrew Collins93409032016-10-03 13:43:02 -06006178 if (adj->ref_nr > ref_nr) {
David Ahern67b62f92016-10-17 19:15:53 -07006179 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6180 dev->name, adj_dev->name, ref_nr,
6181 adj->ref_nr - ref_nr);
Andrew Collins93409032016-10-03 13:43:02 -06006182 adj->ref_nr -= ref_nr;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006183 return;
6184 }
6185
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006186 if (adj->master)
6187 sysfs_remove_link(&(dev->dev.kobj), "master");
6188
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006189 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006190 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006191
Veaceslav Falico5d261912013-08-28 23:25:05 +02006192 list_del_rcu(&adj->list);
David Ahern67b62f92016-10-17 19:15:53 -07006193 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006194 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006195 dev_put(adj_dev);
6196 kfree_rcu(adj, rcu);
6197}
6198
stephen hemminger1d143d92013-12-29 14:01:29 -08006199static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6200 struct net_device *upper_dev,
6201 struct list_head *up_list,
6202 struct list_head *down_list,
6203 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006204{
6205 int ret;
6206
David Ahern790510d2016-10-17 19:15:43 -07006207 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
Andrew Collins93409032016-10-03 13:43:02 -06006208 private, master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006209 if (ret)
6210 return ret;
6211
David Ahern790510d2016-10-17 19:15:43 -07006212 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
Andrew Collins93409032016-10-03 13:43:02 -06006213 private, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006214 if (ret) {
David Ahern790510d2016-10-17 19:15:43 -07006215 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006216 return ret;
6217 }
6218
6219 return 0;
6220}
6221
stephen hemminger1d143d92013-12-29 14:01:29 -08006222static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6223 struct net_device *upper_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006224 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006225 struct list_head *up_list,
6226 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006227{
Andrew Collins93409032016-10-03 13:43:02 -06006228 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6229 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006230}
6231
stephen hemminger1d143d92013-12-29 14:01:29 -08006232static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6233 struct net_device *upper_dev,
6234 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006235{
David Ahernf1170fd2016-10-17 19:15:51 -07006236 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6237 &dev->adj_list.upper,
6238 &upper_dev->adj_list.lower,
6239 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006240}
6241
stephen hemminger1d143d92013-12-29 14:01:29 -08006242static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6243 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006244{
Andrew Collins93409032016-10-03 13:43:02 -06006245 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006246 &dev->adj_list.upper,
6247 &upper_dev->adj_list.lower);
6248}
Veaceslav Falico5d261912013-08-28 23:25:05 +02006249
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006250static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006251 struct net_device *upper_dev, bool master,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006252 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006253{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006254 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006255 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006256
6257 ASSERT_RTNL();
6258
6259 if (dev == upper_dev)
6260 return -EBUSY;
6261
6262 /* To prevent loops, check if dev is not upper device to upper_dev. */
David Ahernf1170fd2016-10-17 19:15:51 -07006263 if (netdev_has_upper_dev(upper_dev, dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006264 return -EBUSY;
6265
David Ahernf1170fd2016-10-17 19:15:51 -07006266 if (netdev_has_upper_dev(dev, upper_dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006267 return -EEXIST;
6268
6269 if (master && netdev_master_upper_dev_get(dev))
6270 return -EBUSY;
6271
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006272 changeupper_info.upper_dev = upper_dev;
6273 changeupper_info.master = master;
6274 changeupper_info.linking = true;
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006275 changeupper_info.upper_info = upper_info;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006276
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006277 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6278 &changeupper_info.info);
6279 ret = notifier_to_errno(ret);
6280 if (ret)
6281 return ret;
6282
Jiri Pirko6dffb042015-12-03 12:12:10 +01006283 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006284 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006285 if (ret)
6286 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006287
Ido Schimmelb03804e2015-12-03 12:12:03 +01006288 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6289 &changeupper_info.info);
6290 ret = notifier_to_errno(ret);
6291 if (ret)
David Ahernf1170fd2016-10-17 19:15:51 -07006292 goto rollback;
Ido Schimmelb03804e2015-12-03 12:12:03 +01006293
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006294 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006295
David Ahernf1170fd2016-10-17 19:15:51 -07006296rollback:
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006297 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006298
6299 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006300}
6301
6302/**
6303 * netdev_upper_dev_link - Add a link to the upper device
6304 * @dev: device
6305 * @upper_dev: new upper device
6306 *
6307 * Adds a link to device which is upper to this one. The caller must hold
6308 * the RTNL lock. On a failure a negative errno code is returned.
6309 * On success the reference counts are adjusted and the function
6310 * returns zero.
6311 */
6312int netdev_upper_dev_link(struct net_device *dev,
6313 struct net_device *upper_dev)
6314{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006315 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006316}
6317EXPORT_SYMBOL(netdev_upper_dev_link);
6318
6319/**
6320 * netdev_master_upper_dev_link - Add a master link to the upper device
6321 * @dev: device
6322 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01006323 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006324 * @upper_info: upper info to be passed down via notifier
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006325 *
6326 * Adds a link to device which is upper to this one. In this case, only
6327 * one master upper device can be linked, although other non-master devices
6328 * might be linked as well. The caller must hold the RTNL lock.
6329 * On a failure a negative errno code is returned. On success the reference
6330 * counts are adjusted and the function returns zero.
6331 */
6332int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01006333 struct net_device *upper_dev,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006334 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006335{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006336 return __netdev_upper_dev_link(dev, upper_dev, true,
6337 upper_priv, upper_info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006338}
6339EXPORT_SYMBOL(netdev_master_upper_dev_link);
6340
6341/**
6342 * netdev_upper_dev_unlink - Removes a link to upper device
6343 * @dev: device
6344 * @upper_dev: new upper device
6345 *
6346 * Removes a link to device which is upper to this one. The caller must hold
6347 * the RTNL lock.
6348 */
6349void netdev_upper_dev_unlink(struct net_device *dev,
6350 struct net_device *upper_dev)
6351{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006352 struct netdev_notifier_changeupper_info changeupper_info;
tchardingf4563a72017-02-09 17:56:07 +11006353
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006354 ASSERT_RTNL();
6355
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006356 changeupper_info.upper_dev = upper_dev;
6357 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
6358 changeupper_info.linking = false;
6359
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006360 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6361 &changeupper_info.info);
6362
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006363 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006364
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006365 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6366 &changeupper_info.info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006367}
6368EXPORT_SYMBOL(netdev_upper_dev_unlink);
6369
Moni Shoua61bd3852015-02-03 16:48:29 +02006370/**
6371 * netdev_bonding_info_change - Dispatch event about slave change
6372 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09006373 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02006374 *
6375 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6376 * The caller must hold the RTNL lock.
6377 */
6378void netdev_bonding_info_change(struct net_device *dev,
6379 struct netdev_bonding_info *bonding_info)
6380{
6381 struct netdev_notifier_bonding_info info;
6382
6383 memcpy(&info.bonding_info, bonding_info,
6384 sizeof(struct netdev_bonding_info));
6385 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
6386 &info.info);
6387}
6388EXPORT_SYMBOL(netdev_bonding_info_change);
6389
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006390static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006391{
6392 struct netdev_adjacent *iter;
6393
6394 struct net *net = dev_net(dev);
6395
6396 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006397 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006398 continue;
6399 netdev_adjacent_sysfs_add(iter->dev, dev,
6400 &iter->dev->adj_list.lower);
6401 netdev_adjacent_sysfs_add(dev, iter->dev,
6402 &dev->adj_list.upper);
6403 }
6404
6405 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006406 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006407 continue;
6408 netdev_adjacent_sysfs_add(iter->dev, dev,
6409 &iter->dev->adj_list.upper);
6410 netdev_adjacent_sysfs_add(dev, iter->dev,
6411 &dev->adj_list.lower);
6412 }
6413}
6414
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006415static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006416{
6417 struct netdev_adjacent *iter;
6418
6419 struct net *net = dev_net(dev);
6420
6421 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006422 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006423 continue;
6424 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6425 &iter->dev->adj_list.lower);
6426 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6427 &dev->adj_list.upper);
6428 }
6429
6430 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006431 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006432 continue;
6433 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6434 &iter->dev->adj_list.upper);
6435 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6436 &dev->adj_list.lower);
6437 }
6438}
6439
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006440void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02006441{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006442 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006443
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006444 struct net *net = dev_net(dev);
6445
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006446 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006447 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006448 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006449 netdev_adjacent_sysfs_del(iter->dev, oldname,
6450 &iter->dev->adj_list.lower);
6451 netdev_adjacent_sysfs_add(iter->dev, dev,
6452 &iter->dev->adj_list.lower);
6453 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006454
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006455 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006456 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006457 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006458 netdev_adjacent_sysfs_del(iter->dev, oldname,
6459 &iter->dev->adj_list.upper);
6460 netdev_adjacent_sysfs_add(iter->dev, dev,
6461 &iter->dev->adj_list.upper);
6462 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006463}
Veaceslav Falico402dae92013-09-25 09:20:09 +02006464
6465void *netdev_lower_dev_get_private(struct net_device *dev,
6466 struct net_device *lower_dev)
6467{
6468 struct netdev_adjacent *lower;
6469
6470 if (!lower_dev)
6471 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006472 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02006473 if (!lower)
6474 return NULL;
6475
6476 return lower->private;
6477}
6478EXPORT_SYMBOL(netdev_lower_dev_get_private);
6479
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006480
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006481int dev_get_nest_level(struct net_device *dev)
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006482{
6483 struct net_device *lower = NULL;
6484 struct list_head *iter;
6485 int max_nest = -1;
6486 int nest;
6487
6488 ASSERT_RTNL();
6489
6490 netdev_for_each_lower_dev(dev, lower, iter) {
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006491 nest = dev_get_nest_level(lower);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006492 if (max_nest < nest)
6493 max_nest = nest;
6494 }
6495
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006496 return max_nest + 1;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006497}
6498EXPORT_SYMBOL(dev_get_nest_level);
6499
Jiri Pirko04d48262015-12-03 12:12:15 +01006500/**
6501 * netdev_lower_change - Dispatch event about lower device state change
6502 * @lower_dev: device
6503 * @lower_state_info: state to dispatch
6504 *
6505 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6506 * The caller must hold the RTNL lock.
6507 */
6508void netdev_lower_state_changed(struct net_device *lower_dev,
6509 void *lower_state_info)
6510{
6511 struct netdev_notifier_changelowerstate_info changelowerstate_info;
6512
6513 ASSERT_RTNL();
6514 changelowerstate_info.lower_state_info = lower_state_info;
6515 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
6516 &changelowerstate_info.info);
6517}
6518EXPORT_SYMBOL(netdev_lower_state_changed);
6519
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006520static void dev_change_rx_flags(struct net_device *dev, int flags)
6521{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006522 const struct net_device_ops *ops = dev->netdev_ops;
6523
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05006524 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006525 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006526}
6527
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006528static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07006529{
Eric Dumazetb536db92011-11-30 21:42:26 +00006530 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006531 kuid_t uid;
6532 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07006533
Patrick McHardy24023452007-07-14 18:51:31 -07006534 ASSERT_RTNL();
6535
Wang Chendad9b332008-06-18 01:48:28 -07006536 dev->flags |= IFF_PROMISC;
6537 dev->promiscuity += inc;
6538 if (dev->promiscuity == 0) {
6539 /*
6540 * Avoid overflow.
6541 * If inc causes overflow, untouch promisc and return error.
6542 */
6543 if (inc < 0)
6544 dev->flags &= ~IFF_PROMISC;
6545 else {
6546 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006547 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6548 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006549 return -EOVERFLOW;
6550 }
6551 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006552 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006553 pr_info("device %s %s promiscuous mode\n",
6554 dev->name,
6555 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11006556 if (audit_enabled) {
6557 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006558 audit_log(current->audit_context, GFP_ATOMIC,
6559 AUDIT_ANOM_PROMISCUOUS,
6560 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6561 dev->name, (dev->flags & IFF_PROMISC),
6562 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07006563 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006564 from_kuid(&init_user_ns, uid),
6565 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006566 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11006567 }
Patrick McHardy24023452007-07-14 18:51:31 -07006568
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006569 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07006570 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006571 if (notify)
6572 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07006573 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006574}
6575
Linus Torvalds1da177e2005-04-16 15:20:36 -07006576/**
6577 * dev_set_promiscuity - update promiscuity count on a device
6578 * @dev: device
6579 * @inc: modifier
6580 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07006581 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006582 * remains above zero the interface remains promiscuous. Once it hits zero
6583 * the device reverts back to normal filtering operation. A negative inc
6584 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07006585 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006586 */
Wang Chendad9b332008-06-18 01:48:28 -07006587int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006588{
Eric Dumazetb536db92011-11-30 21:42:26 +00006589 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07006590 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006591
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006592 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07006593 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07006594 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07006595 if (dev->flags != old_flags)
6596 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07006597 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006598}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006599EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006600
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006601static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006602{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006603 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006604
Patrick McHardy24023452007-07-14 18:51:31 -07006605 ASSERT_RTNL();
6606
Linus Torvalds1da177e2005-04-16 15:20:36 -07006607 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07006608 dev->allmulti += inc;
6609 if (dev->allmulti == 0) {
6610 /*
6611 * Avoid overflow.
6612 * If inc causes overflow, untouch allmulti and return error.
6613 */
6614 if (inc < 0)
6615 dev->flags &= ~IFF_ALLMULTI;
6616 else {
6617 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006618 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6619 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006620 return -EOVERFLOW;
6621 }
6622 }
Patrick McHardy24023452007-07-14 18:51:31 -07006623 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006624 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07006625 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006626 if (notify)
6627 __dev_notify_flags(dev, old_flags,
6628 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07006629 }
Wang Chendad9b332008-06-18 01:48:28 -07006630 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006631}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006632
6633/**
6634 * dev_set_allmulti - update allmulti count on a device
6635 * @dev: device
6636 * @inc: modifier
6637 *
6638 * Add or remove reception of all multicast frames to a device. While the
6639 * count in the device remains above zero the interface remains listening
6640 * to all interfaces. Once it hits zero the device reverts back to normal
6641 * filtering operation. A negative @inc value is used to drop the counter
6642 * when releasing a resource needing all multicasts.
6643 * Return 0 if successful or a negative errno code on error.
6644 */
6645
6646int dev_set_allmulti(struct net_device *dev, int inc)
6647{
6648 return __dev_set_allmulti(dev, inc, true);
6649}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006650EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07006651
6652/*
6653 * Upload unicast and multicast address lists to device and
6654 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08006655 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07006656 * are present.
6657 */
6658void __dev_set_rx_mode(struct net_device *dev)
6659{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006660 const struct net_device_ops *ops = dev->netdev_ops;
6661
Patrick McHardy4417da62007-06-27 01:28:10 -07006662 /* dev_open will call this function so the list will stay sane. */
6663 if (!(dev->flags&IFF_UP))
6664 return;
6665
6666 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09006667 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07006668
Jiri Pirko01789342011-08-16 06:29:00 +00006669 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07006670 /* Unicast addresses changes may only happen under the rtnl,
6671 * therefore calling __dev_set_promiscuity here is safe.
6672 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006673 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006674 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006675 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006676 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006677 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006678 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07006679 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006680 }
Jiri Pirko01789342011-08-16 06:29:00 +00006681
6682 if (ops->ndo_set_rx_mode)
6683 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006684}
6685
6686void dev_set_rx_mode(struct net_device *dev)
6687{
David S. Millerb9e40852008-07-15 00:15:08 -07006688 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006689 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07006690 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006691}
6692
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006693/**
6694 * dev_get_flags - get flags reported to userspace
6695 * @dev: device
6696 *
6697 * Get the combination of flag bits exported through APIs to userspace.
6698 */
Eric Dumazet95c96172012-04-15 05:58:06 +00006699unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006700{
Eric Dumazet95c96172012-04-15 05:58:06 +00006701 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006702
6703 flags = (dev->flags & ~(IFF_PROMISC |
6704 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08006705 IFF_RUNNING |
6706 IFF_LOWER_UP |
6707 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07006708 (dev->gflags & (IFF_PROMISC |
6709 IFF_ALLMULTI));
6710
Stefan Rompfb00055a2006-03-20 17:09:11 -08006711 if (netif_running(dev)) {
6712 if (netif_oper_up(dev))
6713 flags |= IFF_RUNNING;
6714 if (netif_carrier_ok(dev))
6715 flags |= IFF_LOWER_UP;
6716 if (netif_dormant(dev))
6717 flags |= IFF_DORMANT;
6718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006719
6720 return flags;
6721}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006722EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723
Patrick McHardybd380812010-02-26 06:34:53 +00006724int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725{
Eric Dumazetb536db92011-11-30 21:42:26 +00006726 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00006727 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006728
Patrick McHardy24023452007-07-14 18:51:31 -07006729 ASSERT_RTNL();
6730
Linus Torvalds1da177e2005-04-16 15:20:36 -07006731 /*
6732 * Set the flags on our device.
6733 */
6734
6735 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6736 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6737 IFF_AUTOMEDIA)) |
6738 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6739 IFF_ALLMULTI));
6740
6741 /*
6742 * Load in the correct multicast list now the flags have changed.
6743 */
6744
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006745 if ((old_flags ^ flags) & IFF_MULTICAST)
6746 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07006747
Patrick McHardy4417da62007-06-27 01:28:10 -07006748 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006749
6750 /*
6751 * Have we downed the interface. We handle IFF_UP ourselves
6752 * according to user attempts to set it, rather than blindly
6753 * setting it.
6754 */
6755
6756 ret = 0;
stephen hemminger7051b882017-07-18 15:59:27 -07006757 if ((old_flags ^ flags) & IFF_UP) {
6758 if (old_flags & IFF_UP)
6759 __dev_close(dev);
6760 else
6761 ret = __dev_open(dev);
6762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006763
Linus Torvalds1da177e2005-04-16 15:20:36 -07006764 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006765 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006766 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006767
Linus Torvalds1da177e2005-04-16 15:20:36 -07006768 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006769
6770 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6771 if (dev->flags != old_flags)
6772 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006773 }
6774
6775 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
tchardingeb13da12017-02-09 17:56:06 +11006776 * is important. Some (broken) drivers set IFF_PROMISC, when
6777 * IFF_ALLMULTI is requested not asking us and not reporting.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006778 */
6779 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006780 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6781
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006783 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006784 }
6785
Patrick McHardybd380812010-02-26 06:34:53 +00006786 return ret;
6787}
6788
Nicolas Dichtela528c212013-09-25 12:02:44 +02006789void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6790 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00006791{
6792 unsigned int changes = dev->flags ^ old_flags;
6793
Nicolas Dichtela528c212013-09-25 12:02:44 +02006794 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006795 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006796
Patrick McHardybd380812010-02-26 06:34:53 +00006797 if (changes & IFF_UP) {
6798 if (dev->flags & IFF_UP)
6799 call_netdevice_notifiers(NETDEV_UP, dev);
6800 else
6801 call_netdevice_notifiers(NETDEV_DOWN, dev);
6802 }
6803
6804 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00006805 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6806 struct netdev_notifier_change_info change_info;
6807
6808 change_info.flags_changed = changes;
6809 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6810 &change_info.info);
6811 }
Patrick McHardybd380812010-02-26 06:34:53 +00006812}
6813
6814/**
6815 * dev_change_flags - change device settings
6816 * @dev: device
6817 * @flags: device state flags
6818 *
6819 * Change settings on device based state flags. The flags are
6820 * in the userspace exported format.
6821 */
Eric Dumazetb536db92011-11-30 21:42:26 +00006822int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00006823{
Eric Dumazetb536db92011-11-30 21:42:26 +00006824 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006825 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00006826
6827 ret = __dev_change_flags(dev, flags);
6828 if (ret < 0)
6829 return ret;
6830
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006831 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006832 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006833 return ret;
6834}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006835EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006836
WANG Congf51048c2017-07-06 15:01:57 -07006837int __dev_set_mtu(struct net_device *dev, int new_mtu)
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006838{
6839 const struct net_device_ops *ops = dev->netdev_ops;
6840
6841 if (ops->ndo_change_mtu)
6842 return ops->ndo_change_mtu(dev, new_mtu);
6843
6844 dev->mtu = new_mtu;
6845 return 0;
6846}
WANG Congf51048c2017-07-06 15:01:57 -07006847EXPORT_SYMBOL(__dev_set_mtu);
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006848
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006849/**
6850 * dev_set_mtu - Change maximum transfer unit
6851 * @dev: device
6852 * @new_mtu: new transfer unit
6853 *
6854 * Change the maximum transfer size of the network device.
6855 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006856int dev_set_mtu(struct net_device *dev, int new_mtu)
6857{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006858 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006859
6860 if (new_mtu == dev->mtu)
6861 return 0;
6862
Jarod Wilson61e84622016-10-07 22:04:33 -04006863 /* MTU must be positive, and in range */
6864 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6865 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6866 dev->name, new_mtu, dev->min_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006867 return -EINVAL;
Jarod Wilson61e84622016-10-07 22:04:33 -04006868 }
6869
6870 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6871 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
Jakub Kicinskia0e65de2016-10-17 18:02:22 +01006872 dev->name, new_mtu, dev->max_mtu);
Jarod Wilson61e84622016-10-07 22:04:33 -04006873 return -EINVAL;
6874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006875
6876 if (!netif_device_present(dev))
6877 return -ENODEV;
6878
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01006879 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6880 err = notifier_to_errno(err);
6881 if (err)
6882 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006883
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006884 orig_mtu = dev->mtu;
6885 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006886
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006887 if (!err) {
6888 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6889 err = notifier_to_errno(err);
6890 if (err) {
6891 /* setting mtu back and notifying everyone again,
6892 * so that they have a chance to revert changes.
6893 */
6894 __dev_set_mtu(dev, orig_mtu);
6895 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6896 }
6897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006898 return err;
6899}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006900EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006901
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006902/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006903 * dev_set_group - Change group this device belongs to
6904 * @dev: device
6905 * @new_group: group this device should belong to
6906 */
6907void dev_set_group(struct net_device *dev, int new_group)
6908{
6909 dev->group = new_group;
6910}
6911EXPORT_SYMBOL(dev_set_group);
6912
6913/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006914 * dev_set_mac_address - Change Media Access Control Address
6915 * @dev: device
6916 * @sa: new address
6917 *
6918 * Change the hardware (MAC) address of the device
6919 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6921{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006922 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006923 int err;
6924
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006925 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006926 return -EOPNOTSUPP;
6927 if (sa->sa_family != dev->type)
6928 return -EINVAL;
6929 if (!netif_device_present(dev))
6930 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006931 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00006932 if (err)
6933 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00006934 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00006935 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006936 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00006937 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006938}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006939EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006940
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006941/**
6942 * dev_change_carrier - Change device carrier
6943 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00006944 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006945 *
6946 * Change device carrier
6947 */
6948int dev_change_carrier(struct net_device *dev, bool new_carrier)
6949{
6950 const struct net_device_ops *ops = dev->netdev_ops;
6951
6952 if (!ops->ndo_change_carrier)
6953 return -EOPNOTSUPP;
6954 if (!netif_device_present(dev))
6955 return -ENODEV;
6956 return ops->ndo_change_carrier(dev, new_carrier);
6957}
6958EXPORT_SYMBOL(dev_change_carrier);
6959
Linus Torvalds1da177e2005-04-16 15:20:36 -07006960/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02006961 * dev_get_phys_port_id - Get device physical port ID
6962 * @dev: device
6963 * @ppid: port ID
6964 *
6965 * Get device physical port ID
6966 */
6967int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01006968 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02006969{
6970 const struct net_device_ops *ops = dev->netdev_ops;
6971
6972 if (!ops->ndo_get_phys_port_id)
6973 return -EOPNOTSUPP;
6974 return ops->ndo_get_phys_port_id(dev, ppid);
6975}
6976EXPORT_SYMBOL(dev_get_phys_port_id);
6977
6978/**
David Aherndb24a902015-03-17 20:23:15 -06006979 * dev_get_phys_port_name - Get device physical port name
6980 * @dev: device
6981 * @name: port name
Luis de Bethencourted49e652016-03-21 16:31:14 +00006982 * @len: limit of bytes to copy to name
David Aherndb24a902015-03-17 20:23:15 -06006983 *
6984 * Get device physical port name
6985 */
6986int dev_get_phys_port_name(struct net_device *dev,
6987 char *name, size_t len)
6988{
6989 const struct net_device_ops *ops = dev->netdev_ops;
6990
6991 if (!ops->ndo_get_phys_port_name)
6992 return -EOPNOTSUPP;
6993 return ops->ndo_get_phys_port_name(dev, name, len);
6994}
6995EXPORT_SYMBOL(dev_get_phys_port_name);
6996
6997/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006998 * dev_change_proto_down - update protocol port state information
6999 * @dev: device
7000 * @proto_down: new value
7001 *
7002 * This info can be used by switch drivers to set the phys state of the
7003 * port.
7004 */
7005int dev_change_proto_down(struct net_device *dev, bool proto_down)
7006{
7007 const struct net_device_ops *ops = dev->netdev_ops;
7008
7009 if (!ops->ndo_change_proto_down)
7010 return -EOPNOTSUPP;
7011 if (!netif_device_present(dev))
7012 return -ENODEV;
7013 return ops->ndo_change_proto_down(dev, proto_down);
7014}
7015EXPORT_SYMBOL(dev_change_proto_down);
7016
Jakub Kicinskice158e52017-06-21 18:25:09 -07007017u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id)
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007018{
7019 struct netdev_xdp xdp;
7020
7021 memset(&xdp, 0, sizeof(xdp));
7022 xdp.command = XDP_QUERY_PROG;
7023
7024 /* Query must always succeed. */
7025 WARN_ON(xdp_op(dev, &xdp) < 0);
Martin KaFai Lau58038692017-06-15 17:29:09 -07007026 if (prog_id)
7027 *prog_id = xdp.prog_id;
7028
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007029 return xdp.prog_attached;
7030}
7031
7032static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
Jakub Kicinski32d60272017-06-21 18:25:03 -07007033 struct netlink_ext_ack *extack, u32 flags,
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007034 struct bpf_prog *prog)
7035{
7036 struct netdev_xdp xdp;
7037
7038 memset(&xdp, 0, sizeof(xdp));
Jakub Kicinskiee5d0322017-06-21 18:25:04 -07007039 if (flags & XDP_FLAGS_HW_MODE)
7040 xdp.command = XDP_SETUP_PROG_HW;
7041 else
7042 xdp.command = XDP_SETUP_PROG;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007043 xdp.extack = extack;
Jakub Kicinski32d60272017-06-21 18:25:03 -07007044 xdp.flags = flags;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007045 xdp.prog = prog;
7046
7047 return xdp_op(dev, &xdp);
7048}
7049
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07007050/**
Brenden Blancoa7862b42016-07-19 12:16:48 -07007051 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7052 * @dev: device
Jakub Kicinskib5d60982017-05-01 15:53:43 -07007053 * @extack: netlink extended ack
Brenden Blancoa7862b42016-07-19 12:16:48 -07007054 * @fd: new program fd or negative value to clear
Daniel Borkmann85de8572016-11-28 23:16:54 +01007055 * @flags: xdp-related flags
Brenden Blancoa7862b42016-07-19 12:16:48 -07007056 *
7057 * Set or clear a bpf program for a device
7058 */
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07007059int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7060 int fd, u32 flags)
Brenden Blancoa7862b42016-07-19 12:16:48 -07007061{
7062 const struct net_device_ops *ops = dev->netdev_ops;
7063 struct bpf_prog *prog = NULL;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007064 xdp_op_t xdp_op, xdp_chk;
Brenden Blancoa7862b42016-07-19 12:16:48 -07007065 int err;
7066
Daniel Borkmann85de8572016-11-28 23:16:54 +01007067 ASSERT_RTNL();
7068
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007069 xdp_op = xdp_chk = ops->ndo_xdp;
Jakub Kicinskiee5d0322017-06-21 18:25:04 -07007070 if (!xdp_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
Daniel Borkmann0489df92017-05-12 01:04:45 +02007071 return -EOPNOTSUPP;
David S. Millerb5cdae32017-04-18 15:36:58 -04007072 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
7073 xdp_op = generic_xdp_install;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007074 if (xdp_op == xdp_chk)
7075 xdp_chk = generic_xdp_install;
David S. Millerb5cdae32017-04-18 15:36:58 -04007076
Brenden Blancoa7862b42016-07-19 12:16:48 -07007077 if (fd >= 0) {
Martin KaFai Lau58038692017-06-15 17:29:09 -07007078 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk, NULL))
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007079 return -EEXIST;
7080 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
Martin KaFai Lau58038692017-06-15 17:29:09 -07007081 __dev_xdp_attached(dev, xdp_op, NULL))
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007082 return -EBUSY;
Daniel Borkmann85de8572016-11-28 23:16:54 +01007083
Brenden Blancoa7862b42016-07-19 12:16:48 -07007084 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
7085 if (IS_ERR(prog))
7086 return PTR_ERR(prog);
7087 }
7088
Jakub Kicinski32d60272017-06-21 18:25:03 -07007089 err = dev_xdp_install(dev, xdp_op, extack, flags, prog);
Brenden Blancoa7862b42016-07-19 12:16:48 -07007090 if (err < 0 && prog)
7091 bpf_prog_put(prog);
7092
7093 return err;
7094}
Brenden Blancoa7862b42016-07-19 12:16:48 -07007095
7096/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007097 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07007098 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07007099 *
7100 * Returns a suitable unique value for a new device interface
7101 * number. The caller must hold the rtnl semaphore or the
7102 * dev_base_lock to be sure it remains unique.
7103 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07007104static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007105{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00007106 int ifindex = net->ifindex;
tchardingf4563a72017-02-09 17:56:07 +11007107
Linus Torvalds1da177e2005-04-16 15:20:36 -07007108 for (;;) {
7109 if (++ifindex <= 0)
7110 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007111 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00007112 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007113 }
7114}
7115
Linus Torvalds1da177e2005-04-16 15:20:36 -07007116/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08007117static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07007118DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007119
Stephen Hemminger6f05f622007-03-08 20:46:03 -08007120static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007121{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007122 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007123 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007124}
7125
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007126static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007127{
Krishna Kumare93737b2009-12-08 22:26:02 +00007128 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007129 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007130
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007131 BUG_ON(dev_boot_phase);
7132 ASSERT_RTNL();
7133
Krishna Kumare93737b2009-12-08 22:26:02 +00007134 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007135 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00007136 * for initialization unwind. Remove those
7137 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007138 */
7139 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007140 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7141 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007142
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007143 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00007144 list_del(&dev->unreg_list);
7145 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007146 }
Eric Dumazet449f4542011-05-19 12:24:16 +00007147 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007148 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00007149 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007150
Octavian Purdila44345722010-12-13 12:44:07 +00007151 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007152 list_for_each_entry(dev, head, unreg_list)
7153 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04007154 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007155
Octavian Purdila44345722010-12-13 12:44:07 +00007156 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007157 /* And unlink it from device chain. */
7158 unlist_netdevice(dev);
7159
7160 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007161 }
Eric Dumazet41852492016-08-26 12:50:39 -07007162 flush_all_backlogs();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007163
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007164 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007165
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007166 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007167 struct sk_buff *skb = NULL;
7168
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007169 /* Shutdown queueing discipline. */
7170 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007171
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007172
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007173 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11007174 * this device. They should clean all the things.
7175 */
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007176 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7177
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007178 if (!dev->rtnl_link_ops ||
7179 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04007180 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007181 GFP_KERNEL);
7182
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007183 /*
7184 * Flush the unicast and multicast chains
7185 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007186 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00007187 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007188
7189 if (dev->netdev_ops->ndo_uninit)
7190 dev->netdev_ops->ndo_uninit(dev);
7191
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007192 if (skb)
7193 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07007194
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007195 /* Notifier chain MUST detach us all upper devices. */
7196 WARN_ON(netdev_has_any_upper_dev(dev));
David Ahern0f524a82016-10-17 19:15:52 -07007197 WARN_ON(netdev_has_any_lower_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007198
7199 /* Remove entries from kobject tree */
7200 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00007201#ifdef CONFIG_XPS
7202 /* Remove XPS queueing entries */
7203 netif_reset_xps_queues_gt(dev, 0);
7204#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007205 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007206
Eric W. Biederman850a5452011-10-13 22:25:23 +00007207 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007208
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00007209 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007210 dev_put(dev);
7211}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007212
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007213static void rollback_registered(struct net_device *dev)
7214{
7215 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007216
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007217 list_add(&dev->unreg_list, &single);
7218 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007219 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007220}
7221
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007222static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7223 struct net_device *upper, netdev_features_t features)
7224{
7225 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7226 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007227 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007228
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007229 for_each_netdev_feature(&upper_disables, feature_bit) {
7230 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007231 if (!(upper->wanted_features & feature)
7232 && (features & feature)) {
7233 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7234 &feature, upper->name);
7235 features &= ~feature;
7236 }
7237 }
7238
7239 return features;
7240}
7241
7242static void netdev_sync_lower_features(struct net_device *upper,
7243 struct net_device *lower, netdev_features_t features)
7244{
7245 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7246 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007247 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007248
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007249 for_each_netdev_feature(&upper_disables, feature_bit) {
7250 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007251 if (!(features & feature) && (lower->features & feature)) {
7252 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7253 &feature, lower->name);
7254 lower->wanted_features &= ~feature;
7255 netdev_update_features(lower);
7256
7257 if (unlikely(lower->features & feature))
7258 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7259 &feature, lower->name);
7260 }
7261 }
7262}
7263
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007264static netdev_features_t netdev_fix_features(struct net_device *dev,
7265 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07007266{
Michał Mirosław57422dc2011-01-22 12:14:12 +00007267 /* Fix illegal checksum combinations */
7268 if ((features & NETIF_F_HW_CSUM) &&
7269 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007270 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00007271 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7272 }
7273
Herbert Xub63365a2008-10-23 01:11:29 -07007274 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007275 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007276 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007277 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07007278 }
7279
Pravin B Shelarec5f0612013-03-07 09:28:01 +00007280 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7281 !(features & NETIF_F_IP_CSUM)) {
7282 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7283 features &= ~NETIF_F_TSO;
7284 features &= ~NETIF_F_TSO_ECN;
7285 }
7286
7287 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7288 !(features & NETIF_F_IPV6_CSUM)) {
7289 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7290 features &= ~NETIF_F_TSO6;
7291 }
7292
Alexander Duyckb1dc4972016-05-02 09:38:24 -07007293 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7294 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7295 features &= ~NETIF_F_TSO_MANGLEID;
7296
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00007297 /* TSO ECN requires that TSO is present as well. */
7298 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7299 features &= ~NETIF_F_TSO_ECN;
7300
Michał Mirosław212b5732011-02-15 16:59:16 +00007301 /* Software GSO depends on SG. */
7302 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007303 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00007304 features &= ~NETIF_F_GSO;
7305 }
7306
Alexander Duyck802ab552016-04-10 21:45:03 -04007307 /* GSO partial features require GSO partial be set */
7308 if ((features & dev->gso_partial_features) &&
7309 !(features & NETIF_F_GSO_PARTIAL)) {
7310 netdev_dbg(dev,
7311 "Dropping partially supported GSO features since no GSO partial.\n");
7312 features &= ~dev->gso_partial_features;
7313 }
7314
Herbert Xub63365a2008-10-23 01:11:29 -07007315 return features;
7316}
Herbert Xub63365a2008-10-23 01:11:29 -07007317
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007318int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00007319{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007320 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007321 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007322 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05007323 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00007324
Michał Mirosław87267482011-04-12 09:56:38 +00007325 ASSERT_RTNL();
7326
Michał Mirosław5455c692011-02-15 16:59:17 +00007327 features = netdev_get_wanted_features(dev);
7328
7329 if (dev->netdev_ops->ndo_fix_features)
7330 features = dev->netdev_ops->ndo_fix_features(dev, features);
7331
7332 /* driver might be less strict about feature dependencies */
7333 features = netdev_fix_features(dev, features);
7334
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007335 /* some features can't be enabled if they're off an an upper device */
7336 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7337 features = netdev_sync_upper_features(dev, upper, features);
7338
Michał Mirosław5455c692011-02-15 16:59:17 +00007339 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05007340 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00007341
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007342 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7343 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00007344
7345 if (dev->netdev_ops->ndo_set_features)
7346 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01007347 else
7348 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00007349
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007350 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00007351 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007352 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7353 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01007354 /* return non-0 since some features might have changed and
7355 * it's better to fire a spurious notification than miss it
7356 */
7357 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007358 }
7359
Jarod Wilsone7868a82015-11-03 23:09:32 -05007360sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007361 /* some features must be disabled on lower devices when disabled
7362 * on an upper device (think: bonding master or bridge)
7363 */
7364 netdev_for_each_lower_dev(dev, lower, iter)
7365 netdev_sync_lower_features(dev, lower, features);
7366
Sabrina Dubrocaae847f42017-07-21 12:49:31 +02007367 if (!err) {
7368 netdev_features_t diff = features ^ dev->features;
7369
7370 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
7371 /* udp_tunnel_{get,drop}_rx_info both need
7372 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7373 * device, or they won't do anything.
7374 * Thus we need to update dev->features
7375 * *before* calling udp_tunnel_get_rx_info,
7376 * but *after* calling udp_tunnel_drop_rx_info.
7377 */
7378 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
7379 dev->features = features;
7380 udp_tunnel_get_rx_info(dev);
7381 } else {
7382 udp_tunnel_drop_rx_info(dev);
7383 }
7384 }
7385
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007386 dev->features = features;
Sabrina Dubrocaae847f42017-07-21 12:49:31 +02007387 }
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007388
Jarod Wilsone7868a82015-11-03 23:09:32 -05007389 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007390}
7391
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007392/**
7393 * netdev_update_features - recalculate device features
7394 * @dev: the device to check
7395 *
7396 * Recalculate dev->features set and send notifications if it
7397 * has changed. Should be called after driver or hardware dependent
7398 * conditions might have changed that influence the features.
7399 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007400void netdev_update_features(struct net_device *dev)
7401{
7402 if (__netdev_update_features(dev))
7403 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00007404}
7405EXPORT_SYMBOL(netdev_update_features);
7406
Linus Torvalds1da177e2005-04-16 15:20:36 -07007407/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007408 * netdev_change_features - recalculate device features
7409 * @dev: the device to check
7410 *
7411 * Recalculate dev->features set and send notifications even
7412 * if they have not changed. Should be called instead of
7413 * netdev_update_features() if also dev->vlan_features might
7414 * have changed to allow the changes to be propagated to stacked
7415 * VLAN devices.
7416 */
7417void netdev_change_features(struct net_device *dev)
7418{
7419 __netdev_update_features(dev);
7420 netdev_features_change(dev);
7421}
7422EXPORT_SYMBOL(netdev_change_features);
7423
7424/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007425 * netif_stacked_transfer_operstate - transfer operstate
7426 * @rootdev: the root or lower level device to transfer state from
7427 * @dev: the device to transfer operstate to
7428 *
7429 * Transfer operational state from root to device. This is normally
7430 * called when a stacking relationship exists between the root
7431 * device and the device(a leaf device).
7432 */
7433void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7434 struct net_device *dev)
7435{
7436 if (rootdev->operstate == IF_OPER_DORMANT)
7437 netif_dormant_on(dev);
7438 else
7439 netif_dormant_off(dev);
7440
Zhang Shengju0575c862017-04-26 17:49:38 +08007441 if (netif_carrier_ok(rootdev))
7442 netif_carrier_on(dev);
7443 else
7444 netif_carrier_off(dev);
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007445}
7446EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7447
Michael Daltona953be52014-01-16 22:23:28 -08007448#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007449static int netif_alloc_rx_queues(struct net_device *dev)
7450{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007451 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00007452 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05307453 size_t sz = count * sizeof(*rx);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007454
Tom Herbertbd25fa72010-10-18 18:00:16 +00007455 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007456
Michal Hockodcda9b02017-07-12 14:36:45 -07007457 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Michal Hockoda6bc572017-05-08 15:57:31 -07007458 if (!rx)
7459 return -ENOMEM;
7460
Tom Herbertbd25fa72010-10-18 18:00:16 +00007461 dev->_rx = rx;
7462
Tom Herbertbd25fa72010-10-18 18:00:16 +00007463 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00007464 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007465 return 0;
7466}
Tom Herbertbf264142010-11-26 08:36:09 +00007467#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007468
Changli Gaoaa942102010-12-04 02:31:41 +00007469static void netdev_init_one_queue(struct net_device *dev,
7470 struct netdev_queue *queue, void *_unused)
7471{
7472 /* Initialize queue lock */
7473 spin_lock_init(&queue->_xmit_lock);
7474 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7475 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00007476 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00007477 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00007478#ifdef CONFIG_BQL
7479 dql_init(&queue->dql, HZ);
7480#endif
Changli Gaoaa942102010-12-04 02:31:41 +00007481}
7482
Eric Dumazet60877a32013-06-20 01:15:51 -07007483static void netif_free_tx_queues(struct net_device *dev)
7484{
WANG Cong4cb28972014-06-02 15:55:22 -07007485 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07007486}
7487
Tom Herberte6484932010-10-18 18:04:39 +00007488static int netif_alloc_netdev_queues(struct net_device *dev)
7489{
7490 unsigned int count = dev->num_tx_queues;
7491 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07007492 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00007493
Eric Dumazetd3397272015-07-06 17:13:26 +02007494 if (count < 1 || count > 0xffff)
7495 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00007496
Michal Hockodcda9b02017-07-12 14:36:45 -07007497 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Michal Hockoda6bc572017-05-08 15:57:31 -07007498 if (!tx)
7499 return -ENOMEM;
7500
Tom Herberte6484932010-10-18 18:04:39 +00007501 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00007502
Tom Herberte6484932010-10-18 18:04:39 +00007503 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7504 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00007505
7506 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00007507}
7508
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007509void netif_tx_stop_all_queues(struct net_device *dev)
7510{
7511 unsigned int i;
7512
7513 for (i = 0; i < dev->num_tx_queues; i++) {
7514 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
tchardingf4563a72017-02-09 17:56:07 +11007515
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007516 netif_tx_stop_queue(txq);
7517 }
7518}
7519EXPORT_SYMBOL(netif_tx_stop_all_queues);
7520
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007521/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007522 * register_netdevice - register a network device
7523 * @dev: device to register
7524 *
7525 * Take a completed network device structure and add it to the kernel
7526 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7527 * chain. 0 is returned on success. A negative errno code is returned
7528 * on a failure to set up the device, or if the name is a duplicate.
7529 *
7530 * Callers must hold the rtnl semaphore. You may want
7531 * register_netdev() instead of this.
7532 *
7533 * BUGS:
7534 * The locking appears insufficient to guarantee two parallel registers
7535 * will not get the same name.
7536 */
7537
7538int register_netdevice(struct net_device *dev)
7539{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007540 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007541 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542
7543 BUG_ON(dev_boot_phase);
7544 ASSERT_RTNL();
7545
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007546 might_sleep();
7547
Linus Torvalds1da177e2005-04-16 15:20:36 -07007548 /* When net_device's are persistent, this will be fatal. */
7549 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007550 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007551
David S. Millerf1f28aa2008-07-15 00:08:33 -07007552 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07007553 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007554
Gao feng828de4f2012-09-13 20:58:27 +00007555 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00007556 if (ret < 0)
7557 goto out;
7558
Linus Torvalds1da177e2005-04-16 15:20:36 -07007559 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007560 if (dev->netdev_ops->ndo_init) {
7561 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007562 if (ret) {
7563 if (ret > 0)
7564 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08007565 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566 }
7567 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007568
Patrick McHardyf6469682013-04-19 02:04:27 +00007569 if (((dev->hw_features | dev->features) &
7570 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00007571 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7572 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7573 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7574 ret = -EINVAL;
7575 goto err_uninit;
7576 }
7577
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00007578 ret = -EBUSY;
7579 if (!dev->ifindex)
7580 dev->ifindex = dev_new_index(net);
7581 else if (__dev_get_by_index(net, dev->ifindex))
7582 goto err_uninit;
7583
Michał Mirosław5455c692011-02-15 16:59:17 +00007584 /* Transfer changeable features to wanted_features and enable
7585 * software offloads (GSO and GRO).
7586 */
7587 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00007588 dev->features |= NETIF_F_SOFT_FEATURES;
Sabrina Dubrocad764a122017-07-21 12:49:28 +02007589
7590 if (dev->netdev_ops->ndo_udp_tunnel_add) {
7591 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7592 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7593 }
7594
Michał Mirosław14d12322011-02-22 16:52:28 +00007595 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007596
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007597 if (!(dev->flags & IFF_LOOPBACK))
Michał Mirosław34324dc2011-11-15 15:29:55 +00007598 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007599
Alexander Duyck7f348a62016-04-20 16:51:00 -04007600 /* If IPv4 TCP segmentation offload is supported we should also
7601 * allow the device to enable segmenting the frame with the option
7602 * of ignoring a static IP ID value. This doesn't enable the
7603 * feature itself but allows the user to enable it later.
7604 */
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007605 if (dev->hw_features & NETIF_F_TSO)
7606 dev->hw_features |= NETIF_F_TSO_MANGLEID;
Alexander Duyck7f348a62016-04-20 16:51:00 -04007607 if (dev->vlan_features & NETIF_F_TSO)
7608 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7609 if (dev->mpls_features & NETIF_F_TSO)
7610 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7611 if (dev->hw_enc_features & NETIF_F_TSO)
7612 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07007613
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007614 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00007615 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007616 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00007617
Pravin B Shelaree579672013-03-07 09:28:08 +00007618 /* Make NETIF_F_SG inheritable to tunnel devices.
7619 */
Alexander Duyck802ab552016-04-10 21:45:03 -04007620 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
Pravin B Shelaree579672013-03-07 09:28:08 +00007621
Simon Horman0d89d202013-05-23 21:02:52 +00007622 /* Make NETIF_F_SG inheritable to MPLS.
7623 */
7624 dev->mpls_features |= NETIF_F_SG;
7625
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00007626 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7627 ret = notifier_to_errno(ret);
7628 if (ret)
7629 goto err_uninit;
7630
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007631 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007632 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007633 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007634 dev->reg_state = NETREG_REGISTERED;
7635
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007636 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00007637
Linus Torvalds1da177e2005-04-16 15:20:36 -07007638 /*
7639 * Default initial state at registry is that the
7640 * device is present.
7641 */
7642
7643 set_bit(__LINK_STATE_PRESENT, &dev->state);
7644
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01007645 linkwatch_init_dev(dev);
7646
Linus Torvalds1da177e2005-04-16 15:20:36 -07007647 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007648 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007649 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04007650 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007651
Jiri Pirko948b3372013-01-08 01:38:25 +00007652 /* If the device has permanent device address, driver should
7653 * set dev_addr and also addr_assign_type should be set to
7654 * NET_ADDR_PERM (default value).
7655 */
7656 if (dev->addr_assign_type == NET_ADDR_PERM)
7657 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7658
Linus Torvalds1da177e2005-04-16 15:20:36 -07007659 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007660 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07007661 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007662 if (ret) {
7663 rollback_registered(dev);
7664 dev->reg_state = NETREG_UNREGISTERED;
7665 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007666 /*
7667 * Prevent userspace races by waiting until the network
7668 * device is fully setup before sending notifications.
7669 */
Patrick McHardya2835762010-02-26 06:34:51 +00007670 if (!dev->rtnl_link_ops ||
7671 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007672 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007673
7674out:
7675 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007676
7677err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007678 if (dev->netdev_ops->ndo_uninit)
7679 dev->netdev_ops->ndo_uninit(dev);
David S. Millercf124db2017-05-08 12:52:56 -04007680 if (dev->priv_destructor)
7681 dev->priv_destructor(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007682 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007683}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007684EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007685
7686/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007687 * init_dummy_netdev - init a dummy network device for NAPI
7688 * @dev: device to init
7689 *
7690 * This takes a network device structure and initialize the minimum
7691 * amount of fields so it can be used to schedule NAPI polls without
7692 * registering a full blown interface. This is to be used by drivers
7693 * that need to tie several hardware interfaces to a single NAPI
7694 * poll scheduler due to HW limitations.
7695 */
7696int init_dummy_netdev(struct net_device *dev)
7697{
7698 /* Clear everything. Note we don't initialize spinlocks
7699 * are they aren't supposed to be taken by any of the
7700 * NAPI code and this dummy netdev is supposed to be
7701 * only ever used for NAPI polls
7702 */
7703 memset(dev, 0, sizeof(struct net_device));
7704
7705 /* make sure we BUG if trying to hit standard
7706 * register/unregister code path
7707 */
7708 dev->reg_state = NETREG_DUMMY;
7709
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007710 /* NAPI wants this */
7711 INIT_LIST_HEAD(&dev->napi_list);
7712
7713 /* a dummy interface is started by default */
7714 set_bit(__LINK_STATE_PRESENT, &dev->state);
7715 set_bit(__LINK_STATE_START, &dev->state);
7716
Eric Dumazet29b44332010-10-11 10:22:12 +00007717 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7718 * because users of this 'device' dont need to change
7719 * its refcount.
7720 */
7721
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007722 return 0;
7723}
7724EXPORT_SYMBOL_GPL(init_dummy_netdev);
7725
7726
7727/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007728 * register_netdev - register a network device
7729 * @dev: device to register
7730 *
7731 * Take a completed network device structure and add it to the kernel
7732 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7733 * chain. 0 is returned on success. A negative errno code is returned
7734 * on a failure to set up the device, or if the name is a duplicate.
7735 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07007736 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07007737 * and expands the device name if you passed a format string to
7738 * alloc_netdev.
7739 */
7740int register_netdev(struct net_device *dev)
7741{
7742 int err;
7743
7744 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007745 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007746 rtnl_unlock();
7747 return err;
7748}
7749EXPORT_SYMBOL(register_netdev);
7750
Eric Dumazet29b44332010-10-11 10:22:12 +00007751int netdev_refcnt_read(const struct net_device *dev)
7752{
7753 int i, refcnt = 0;
7754
7755 for_each_possible_cpu(i)
7756 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7757 return refcnt;
7758}
7759EXPORT_SYMBOL(netdev_refcnt_read);
7760
Ben Hutchings2c530402012-07-10 10:55:09 +00007761/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007762 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00007763 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07007764 *
7765 * This is called when unregistering network devices.
7766 *
7767 * Any protocol or device that holds a reference should register
7768 * for netdevice notification, and cleanup and put back the
7769 * reference if they receive an UNREGISTER event.
7770 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007771 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007772 */
7773static void netdev_wait_allrefs(struct net_device *dev)
7774{
7775 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00007776 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007777
Eric Dumazete014deb2009-11-17 05:59:21 +00007778 linkwatch_forget_dev(dev);
7779
Linus Torvalds1da177e2005-04-16 15:20:36 -07007780 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00007781 refcnt = netdev_refcnt_read(dev);
7782
7783 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007784 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007785 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007786
7787 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007788 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007789
Eric Dumazet748e2d92012-08-22 21:50:59 +00007790 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007791 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00007792 rtnl_lock();
7793
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007794 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007795 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7796 &dev->state)) {
7797 /* We must not have linkwatch events
7798 * pending on unregister. If this
7799 * happens, we simply run the queue
7800 * unscheduled, resulting in a noop
7801 * for this device.
7802 */
7803 linkwatch_run_queue();
7804 }
7805
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007806 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007807
7808 rebroadcast_time = jiffies;
7809 }
7810
7811 msleep(250);
7812
Eric Dumazet29b44332010-10-11 10:22:12 +00007813 refcnt = netdev_refcnt_read(dev);
7814
Linus Torvalds1da177e2005-04-16 15:20:36 -07007815 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007816 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7817 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007818 warning_time = jiffies;
7819 }
7820 }
7821}
7822
7823/* The sequence is:
7824 *
7825 * rtnl_lock();
7826 * ...
7827 * register_netdevice(x1);
7828 * register_netdevice(x2);
7829 * ...
7830 * unregister_netdevice(y1);
7831 * unregister_netdevice(y2);
7832 * ...
7833 * rtnl_unlock();
7834 * free_netdev(y1);
7835 * free_netdev(y2);
7836 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07007837 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007838 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007839 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07007840 * without deadlocking with linkwatch via keventd.
7841 * 2) Since we run with the RTNL semaphore not held, we can sleep
7842 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07007843 *
7844 * We must not return until all unregister events added during
7845 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007846 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007847void netdev_run_todo(void)
7848{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007849 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007850
Linus Torvalds1da177e2005-04-16 15:20:36 -07007851 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007852 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07007853
7854 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007855
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007856
7857 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00007858 if (!list_empty(&list))
7859 rcu_barrier();
7860
Linus Torvalds1da177e2005-04-16 15:20:36 -07007861 while (!list_empty(&list)) {
7862 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00007863 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007864 list_del(&dev->todo_list);
7865
Eric Dumazet748e2d92012-08-22 21:50:59 +00007866 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007867 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00007868 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007869
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007870 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007871 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007872 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007873 dump_stack();
7874 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007875 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007876
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007877 dev->reg_state = NETREG_UNREGISTERED;
7878
7879 netdev_wait_allrefs(dev);
7880
7881 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00007882 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08007883 BUG_ON(!list_empty(&dev->ptype_all));
7884 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00007885 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7886 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07007887 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007888
David S. Millercf124db2017-05-08 12:52:56 -04007889 if (dev->priv_destructor)
7890 dev->priv_destructor(dev);
7891 if (dev->needs_free_netdev)
7892 free_netdev(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007893
Eric W. Biederman50624c92013-09-23 21:19:49 -07007894 /* Report a network device has been unregistered */
7895 rtnl_lock();
7896 dev_net(dev)->dev_unreg_count--;
7897 __rtnl_unlock();
7898 wake_up(&netdev_unregistering_wq);
7899
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007900 /* Free network device */
7901 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007903}
7904
Jarod Wilson92566452016-02-01 18:51:04 -05007905/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7906 * all the same fields in the same order as net_device_stats, with only
7907 * the type differing, but rtnl_link_stats64 may have additional fields
7908 * at the end for newer counters.
Ben Hutchings3cfde792010-07-09 09:11:52 +00007909 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007910void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7911 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00007912{
7913#if BITS_PER_LONG == 64
Jarod Wilson92566452016-02-01 18:51:04 -05007914 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
Alban Browaeys9af99592017-07-03 03:20:13 +02007915 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
Jarod Wilson92566452016-02-01 18:51:04 -05007916 /* zero out counters that only exist in rtnl_link_stats64 */
7917 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7918 sizeof(*stats64) - sizeof(*netdev_stats));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007919#else
Jarod Wilson92566452016-02-01 18:51:04 -05007920 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007921 const unsigned long *src = (const unsigned long *)netdev_stats;
7922 u64 *dst = (u64 *)stats64;
7923
Jarod Wilson92566452016-02-01 18:51:04 -05007924 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007925 for (i = 0; i < n; i++)
7926 dst[i] = src[i];
Jarod Wilson92566452016-02-01 18:51:04 -05007927 /* zero out counters that only exist in rtnl_link_stats64 */
7928 memset((char *)stats64 + n * sizeof(u64), 0,
7929 sizeof(*stats64) - n * sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007930#endif
7931}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007932EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007933
Eric Dumazetd83345a2009-11-16 03:36:51 +00007934/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007935 * dev_get_stats - get network device statistics
7936 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07007937 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007938 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00007939 * Get network statistics from device. Return @storage.
7940 * The device driver may provide its own method by setting
7941 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7942 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007943 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00007944struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7945 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00007946{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007947 const struct net_device_ops *ops = dev->netdev_ops;
7948
Eric Dumazet28172732010-07-07 14:58:56 -07007949 if (ops->ndo_get_stats64) {
7950 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007951 ops->ndo_get_stats64(dev, storage);
7952 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00007953 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007954 } else {
7955 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07007956 }
Eric Dumazet6f64ec72017-06-27 07:02:20 -07007957 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
7958 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
7959 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
Eric Dumazet28172732010-07-07 14:58:56 -07007960 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07007961}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007962EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07007963
Eric Dumazet24824a02010-10-02 06:11:55 +00007964struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07007965{
Eric Dumazet24824a02010-10-02 06:11:55 +00007966 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07007967
Eric Dumazet24824a02010-10-02 06:11:55 +00007968#ifdef CONFIG_NET_CLS_ACT
7969 if (queue)
7970 return queue;
7971 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7972 if (!queue)
7973 return NULL;
7974 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007975 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00007976 queue->qdisc_sleeping = &noop_qdisc;
7977 rcu_assign_pointer(dev->ingress_queue, queue);
7978#endif
7979 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07007980}
7981
Eric Dumazet2c60db02012-09-16 09:17:26 +00007982static const struct ethtool_ops default_ethtool_ops;
7983
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00007984void netdev_set_default_ethtool_ops(struct net_device *dev,
7985 const struct ethtool_ops *ops)
7986{
7987 if (dev->ethtool_ops == &default_ethtool_ops)
7988 dev->ethtool_ops = ops;
7989}
7990EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7991
Eric Dumazet74d332c2013-10-30 13:10:44 -07007992void netdev_freemem(struct net_device *dev)
7993{
7994 char *addr = (char *)dev - dev->padded;
7995
WANG Cong4cb28972014-06-02 15:55:22 -07007996 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007997}
7998
Linus Torvalds1da177e2005-04-16 15:20:36 -07007999/**
tcharding722c9a02017-02-09 17:56:04 +11008000 * alloc_netdev_mqs - allocate network device
8001 * @sizeof_priv: size of private data to allocate space for
8002 * @name: device name format string
8003 * @name_assign_type: origin of device name
8004 * @setup: callback to initialize device
8005 * @txqs: the number of TX subqueues to allocate
8006 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07008007 *
tcharding722c9a02017-02-09 17:56:04 +11008008 * Allocates a struct net_device with private data area for driver use
8009 * and performs basic initialization. Also allocates subqueue structs
8010 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008011 */
Tom Herbert36909ea2011-01-09 19:36:31 +00008012struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02008013 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00008014 void (*setup)(struct net_device *),
8015 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008016{
Linus Torvalds1da177e2005-04-16 15:20:36 -07008017 struct net_device *dev;
Alexey Dobriyan52a59bd2017-09-21 23:33:29 +03008018 unsigned int alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00008019 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008020
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07008021 BUG_ON(strlen(name) >= sizeof(dev->name));
8022
Tom Herbert36909ea2011-01-09 19:36:31 +00008023 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008024 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00008025 return NULL;
8026 }
8027
Michael Daltona953be52014-01-16 22:23:28 -08008028#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00008029 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008030 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00008031 return NULL;
8032 }
8033#endif
8034
David S. Millerfd2ea0a2008-07-17 01:56:23 -07008035 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07008036 if (sizeof_priv) {
8037 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00008038 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07008039 alloc_size += sizeof_priv;
8040 }
8041 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00008042 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008043
Michal Hockodcda9b02017-07-12 14:36:45 -07008044 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Joe Perches62b59422013-02-04 16:48:16 +00008045 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008046 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008047
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00008048 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008049 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008050
Eric Dumazet29b44332010-10-11 10:22:12 +00008051 dev->pcpu_refcnt = alloc_percpu(int);
8052 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07008053 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008054
Linus Torvalds1da177e2005-04-16 15:20:36 -07008055 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00008056 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008057
Jiri Pirko22bedad32010-04-01 21:22:57 +00008058 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00008059 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00008060
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09008061 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008062
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07008063 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00008064 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008065
Herbert Xud565b0a2008-12-15 23:38:52 -08008066 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00008067 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07008068 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00008069 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02008070 INIT_LIST_HEAD(&dev->adj_list.upper);
8071 INIT_LIST_HEAD(&dev->adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08008072 INIT_LIST_HEAD(&dev->ptype_all);
8073 INIT_LIST_HEAD(&dev->ptype_specific);
Jiri Kosina59cc1f62016-08-10 11:05:15 +02008074#ifdef CONFIG_NET_SCHED
8075 hash_init(dev->qdisc_hash);
8076#endif
Eric Dumazet02875872014-10-05 18:38:35 -07008077 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008078 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008079
Phil Suttera8131042016-02-17 15:37:43 +01008080 if (!dev->tx_queue_len) {
Phil Sutterf84bb1e2015-08-27 21:21:36 +02008081 dev->priv_flags |= IFF_NO_QUEUE;
Jesper Dangaard Brouer11597082016-11-03 14:56:06 +01008082 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
Phil Suttera8131042016-02-17 15:37:43 +01008083 }
Phil Sutter906470c2015-08-18 10:30:48 +02008084
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008085 dev->num_tx_queues = txqs;
8086 dev->real_num_tx_queues = txqs;
8087 if (netif_alloc_netdev_queues(dev))
8088 goto free_all;
8089
Michael Daltona953be52014-01-16 22:23:28 -08008090#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008091 dev->num_rx_queues = rxqs;
8092 dev->real_num_rx_queues = rxqs;
8093 if (netif_alloc_rx_queues(dev))
8094 goto free_all;
8095#endif
8096
Linus Torvalds1da177e2005-04-16 15:20:36 -07008097 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02008098 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00008099 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00008100 if (!dev->ethtool_ops)
8101 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +02008102
8103 nf_hook_ingress_init(dev);
8104
Linus Torvalds1da177e2005-04-16 15:20:36 -07008105 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008106
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008107free_all:
8108 free_netdev(dev);
8109 return NULL;
8110
Eric Dumazet29b44332010-10-11 10:22:12 +00008111free_pcpu:
8112 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07008113free_dev:
8114 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008115 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008116}
Tom Herbert36909ea2011-01-09 19:36:31 +00008117EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008118
8119/**
tcharding722c9a02017-02-09 17:56:04 +11008120 * free_netdev - free network device
8121 * @dev: device
Linus Torvalds1da177e2005-04-16 15:20:36 -07008122 *
tcharding722c9a02017-02-09 17:56:04 +11008123 * This function does the last stage of destroying an allocated device
8124 * interface. The reference to the device object is released. If this
8125 * is the last reference then it will be freed.Must be called in process
8126 * context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127 */
8128void free_netdev(struct net_device *dev)
8129{
Herbert Xud565b0a2008-12-15 23:38:52 -08008130 struct napi_struct *p, *n;
David S. Millerb5cdae32017-04-18 15:36:58 -04008131 struct bpf_prog *prog;
Herbert Xud565b0a2008-12-15 23:38:52 -08008132
Eric Dumazet93d05d42015-11-18 06:31:03 -08008133 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -07008134 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08008135#ifdef CONFIG_SYSFS
Pankaj Gupta10595902015-01-12 11:41:28 +05308136 kvfree(dev->_rx);
Tom Herbertfe822242010-11-09 10:47:38 +00008137#endif
David S. Millere8a04642008-07-17 00:34:19 -07008138
Eric Dumazet33d480c2011-08-11 19:30:52 +00008139 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00008140
Jiri Pirkof001fde2009-05-05 02:48:28 +00008141 /* Flush device addresses */
8142 dev_addr_flush(dev);
8143
Herbert Xud565b0a2008-12-15 23:38:52 -08008144 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8145 netif_napi_del(p);
8146
Eric Dumazet29b44332010-10-11 10:22:12 +00008147 free_percpu(dev->pcpu_refcnt);
8148 dev->pcpu_refcnt = NULL;
8149
David S. Millerb5cdae32017-04-18 15:36:58 -04008150 prog = rcu_dereference_protected(dev->xdp_prog, 1);
8151 if (prog) {
8152 bpf_prog_put(prog);
8153 static_key_slow_dec(&generic_xdp_needed);
8154 }
8155
Stephen Hemminger3041a062006-05-26 13:25:24 -07008156 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008157 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07008158 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008159 return;
8160 }
8161
8162 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8163 dev->reg_state = NETREG_RELEASED;
8164
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07008165 /* will free via device release */
8166 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008167}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008168EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008169
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008170/**
8171 * synchronize_net - Synchronize with packet receive processing
8172 *
8173 * Wait for packets currently being received to be done.
8174 * Does not block later packets from starting.
8175 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008176void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008177{
8178 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00008179 if (rtnl_is_locked())
8180 synchronize_rcu_expedited();
8181 else
8182 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008183}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008184EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008185
8186/**
Eric Dumazet44a08732009-10-27 07:03:04 +00008187 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07008188 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00008189 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08008190 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008191 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008192 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00008193 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008194 *
8195 * Callers must hold the rtnl semaphore. You may want
8196 * unregister_netdev() instead of this.
8197 */
8198
Eric Dumazet44a08732009-10-27 07:03:04 +00008199void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008200{
Herbert Xua6620712007-12-12 19:21:56 -08008201 ASSERT_RTNL();
8202
Eric Dumazet44a08732009-10-27 07:03:04 +00008203 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00008204 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00008205 } else {
8206 rollback_registered(dev);
8207 /* Finish processing unregister after unlock */
8208 net_set_todo(dev);
8209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008210}
Eric Dumazet44a08732009-10-27 07:03:04 +00008211EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008212
8213/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008214 * unregister_netdevice_many - unregister many devices
8215 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07008216 *
8217 * Note: As most callers use a stack allocated list_head,
8218 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008219 */
8220void unregister_netdevice_many(struct list_head *head)
8221{
8222 struct net_device *dev;
8223
8224 if (!list_empty(head)) {
8225 rollback_registered_many(head);
8226 list_for_each_entry(dev, head, unreg_list)
8227 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07008228 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008229 }
8230}
Eric Dumazet63c80992009-10-27 07:06:49 +00008231EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008232
8233/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008234 * unregister_netdev - remove device from the kernel
8235 * @dev: device
8236 *
8237 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008238 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008239 *
8240 * This is just a wrapper for unregister_netdevice that takes
8241 * the rtnl semaphore. In general you want to use this and not
8242 * unregister_netdevice.
8243 */
8244void unregister_netdev(struct net_device *dev)
8245{
8246 rtnl_lock();
8247 unregister_netdevice(dev);
8248 rtnl_unlock();
8249}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008250EXPORT_SYMBOL(unregister_netdev);
8251
Eric W. Biedermance286d32007-09-12 13:53:49 +02008252/**
8253 * dev_change_net_namespace - move device to different nethost namespace
8254 * @dev: device
8255 * @net: network namespace
8256 * @pat: If not NULL name pattern to try if the current device name
8257 * is already taken in the destination network namespace.
8258 *
8259 * This function shuts down a device interface and moves it
8260 * to a new network namespace. On success 0 is returned, on
8261 * a failure a netagive errno code is returned.
8262 *
8263 * Callers must hold the rtnl semaphore.
8264 */
8265
8266int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8267{
Eric W. Biedermance286d32007-09-12 13:53:49 +02008268 int err;
8269
8270 ASSERT_RTNL();
8271
8272 /* Don't allow namespace local devices to be moved. */
8273 err = -EINVAL;
8274 if (dev->features & NETIF_F_NETNS_LOCAL)
8275 goto out;
8276
8277 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008278 if (dev->reg_state != NETREG_REGISTERED)
8279 goto out;
8280
8281 /* Get out if there is nothing todo */
8282 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09008283 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008284 goto out;
8285
8286 /* Pick the destination device name, and ensure
8287 * we can use it in the destination network namespace.
8288 */
8289 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00008290 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008291 /* We get here if we can't use the current device name */
8292 if (!pat)
8293 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00008294 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008295 goto out;
8296 }
8297
8298 /*
8299 * And now a mini version of register_netdevice unregister_netdevice.
8300 */
8301
8302 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07008303 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008304
8305 /* And unlink it from device chain */
8306 err = -ENODEV;
8307 unlist_netdevice(dev);
8308
8309 synchronize_net();
8310
8311 /* Shutdown queueing discipline. */
8312 dev_shutdown(dev);
8313
8314 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11008315 * this device. They should clean all the things.
8316 *
8317 * Note that dev->reg_state stays at NETREG_REGISTERED.
8318 * This is wanted because this way 8021q and macvlan know
8319 * the device is just moving and can keep their slaves up.
8320 */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008321 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00008322 rcu_barrier();
8323 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008324 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008325
8326 /*
8327 * Flush the unicast and multicast chains
8328 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00008329 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00008330 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008331
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008332 /* Send a netdev-removed uevent to the old namespace */
8333 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008334 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008335
Eric W. Biedermance286d32007-09-12 13:53:49 +02008336 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09008337 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008338
Eric W. Biedermance286d32007-09-12 13:53:49 +02008339 /* If there is an ifindex conflict assign a new one */
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02008340 if (__dev_get_by_index(net, dev->ifindex))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008341 dev->ifindex = dev_new_index(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008342
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008343 /* Send a netdev-add uevent to the new namespace */
8344 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008345 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008346
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008347 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07008348 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008349 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008350
8351 /* Add the device back in the hashes */
8352 list_netdevice(dev);
8353
8354 /* Notify protocols, that a new device appeared. */
8355 call_netdevice_notifiers(NETDEV_REGISTER, dev);
8356
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008357 /*
8358 * Prevent userspace races by waiting until the network
8359 * device is fully setup before sending notifications.
8360 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008361 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008362
Eric W. Biedermance286d32007-09-12 13:53:49 +02008363 synchronize_net();
8364 err = 0;
8365out:
8366 return err;
8367}
Johannes Berg463d0182009-07-14 00:33:35 +02008368EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008369
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008370static int dev_cpu_dead(unsigned int oldcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008371{
8372 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008373 struct sk_buff *skb;
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008374 unsigned int cpu;
Ashwanth Goli97d8b6e2017-06-13 16:54:55 +05308375 struct softnet_data *sd, *oldsd, *remsd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008376
Linus Torvalds1da177e2005-04-16 15:20:36 -07008377 local_irq_disable();
8378 cpu = smp_processor_id();
8379 sd = &per_cpu(softnet_data, cpu);
8380 oldsd = &per_cpu(softnet_data, oldcpu);
8381
8382 /* Find end of our completion_queue. */
8383 list_skb = &sd->completion_queue;
8384 while (*list_skb)
8385 list_skb = &(*list_skb)->next;
8386 /* Append completion queue from offline CPU. */
8387 *list_skb = oldsd->completion_queue;
8388 oldsd->completion_queue = NULL;
8389
Linus Torvalds1da177e2005-04-16 15:20:36 -07008390 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00008391 if (oldsd->output_queue) {
8392 *sd->output_queue_tailp = oldsd->output_queue;
8393 sd->output_queue_tailp = oldsd->output_queue_tailp;
8394 oldsd->output_queue = NULL;
8395 oldsd->output_queue_tailp = &oldsd->output_queue;
8396 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008397 /* Append NAPI poll list from offline CPU, with one exception :
8398 * process_backlog() must be called by cpu owning percpu backlog.
8399 * We properly handle process_queue & input_pkt_queue later.
8400 */
8401 while (!list_empty(&oldsd->poll_list)) {
8402 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8403 struct napi_struct,
8404 poll_list);
8405
8406 list_del_init(&napi->poll_list);
8407 if (napi->poll == process_backlog)
8408 napi->state = 0;
8409 else
8410 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00008411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008412
8413 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8414 local_irq_enable();
8415
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05308416#ifdef CONFIG_RPS
8417 remsd = oldsd->rps_ipi_list;
8418 oldsd->rps_ipi_list = NULL;
8419#endif
8420 /* send out pending IPI's on offline CPU */
8421 net_rps_send_ipi(remsd);
8422
Linus Torvalds1da177e2005-04-16 15:20:36 -07008423 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00008424 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008425 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008426 input_queue_head_incr(oldsd);
8427 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008428 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008429 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008430 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07008431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008432
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008433 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008434}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008435
Herbert Xu7f353bf2007-08-10 15:47:58 -07008436/**
Herbert Xub63365a2008-10-23 01:11:29 -07008437 * netdev_increment_features - increment feature set by one
8438 * @all: current feature set
8439 * @one: new feature set
8440 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07008441 *
8442 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07008443 * @one to the master device with current feature set @all. Will not
8444 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07008445 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00008446netdev_features_t netdev_increment_features(netdev_features_t all,
8447 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07008448{
Tom Herbertc8cd0982015-12-14 11:19:44 -08008449 if (mask & NETIF_F_HW_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08008450 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +00008451 mask |= NETIF_F_VLAN_CHALLENGED;
8452
Tom Herberta1882222015-12-14 11:19:43 -08008453 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +00008454 all &= one | ~NETIF_F_ALL_FOR_ALL;
8455
Michał Mirosław1742f182011-04-22 06:31:16 +00008456 /* If one device supports hw checksumming, set for all. */
Tom Herbertc8cd0982015-12-14 11:19:44 -08008457 if (all & NETIF_F_HW_CSUM)
8458 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008459
8460 return all;
8461}
Herbert Xub63365a2008-10-23 01:11:29 -07008462EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008463
Baruch Siach430f03c2013-06-02 20:43:55 +00008464static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008465{
8466 int i;
8467 struct hlist_head *hash;
8468
8469 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8470 if (hash != NULL)
8471 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8472 INIT_HLIST_HEAD(&hash[i]);
8473
8474 return hash;
8475}
8476
Eric W. Biederman881d9662007-09-17 11:56:21 -07008477/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07008478static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008479{
Rustad, Mark D734b6542012-07-18 09:06:07 +00008480 if (net != &init_net)
8481 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07008482
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008483 net->dev_name_head = netdev_create_hash();
8484 if (net->dev_name_head == NULL)
8485 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008486
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008487 net->dev_index_head = netdev_create_hash();
8488 if (net->dev_index_head == NULL)
8489 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008490
8491 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008492
8493err_idx:
8494 kfree(net->dev_name_head);
8495err_name:
8496 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008497}
8498
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008499/**
8500 * netdev_drivername - network driver for the device
8501 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008502 *
8503 * Determine network driver for device.
8504 */
David S. Miller3019de12011-06-06 16:41:33 -07008505const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07008506{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07008507 const struct device_driver *driver;
8508 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07008509 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07008510
8511 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008512 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07008513 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008514
8515 driver = parent->driver;
8516 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07008517 return driver->name;
8518 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008519}
8520
Joe Perches6ea754e2014-09-22 11:10:50 -07008521static void __netdev_printk(const char *level, const struct net_device *dev,
8522 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00008523{
Joe Perchesb004ff42012-09-12 20:12:19 -07008524 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008525 dev_printk_emit(level[1] - '0',
8526 dev->dev.parent,
8527 "%s %s %s%s: %pV",
8528 dev_driver_string(dev->dev.parent),
8529 dev_name(dev->dev.parent),
8530 netdev_name(dev), netdev_reg_state(dev),
8531 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008532 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008533 printk("%s%s%s: %pV",
8534 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008535 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07008536 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008537 }
Joe Perches256df2f2010-06-27 01:02:35 +00008538}
8539
Joe Perches6ea754e2014-09-22 11:10:50 -07008540void netdev_printk(const char *level, const struct net_device *dev,
8541 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00008542{
8543 struct va_format vaf;
8544 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00008545
8546 va_start(args, format);
8547
8548 vaf.fmt = format;
8549 vaf.va = &args;
8550
Joe Perches6ea754e2014-09-22 11:10:50 -07008551 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008552
Joe Perches256df2f2010-06-27 01:02:35 +00008553 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00008554}
8555EXPORT_SYMBOL(netdev_printk);
8556
8557#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07008558void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00008559{ \
Joe Perches256df2f2010-06-27 01:02:35 +00008560 struct va_format vaf; \
8561 va_list args; \
8562 \
8563 va_start(args, fmt); \
8564 \
8565 vaf.fmt = fmt; \
8566 vaf.va = &args; \
8567 \
Joe Perches6ea754e2014-09-22 11:10:50 -07008568 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07008569 \
Joe Perches256df2f2010-06-27 01:02:35 +00008570 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00008571} \
8572EXPORT_SYMBOL(func);
8573
8574define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8575define_netdev_printk_level(netdev_alert, KERN_ALERT);
8576define_netdev_printk_level(netdev_crit, KERN_CRIT);
8577define_netdev_printk_level(netdev_err, KERN_ERR);
8578define_netdev_printk_level(netdev_warn, KERN_WARNING);
8579define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8580define_netdev_printk_level(netdev_info, KERN_INFO);
8581
Pavel Emelyanov46650792007-10-08 20:38:39 -07008582static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008583{
8584 kfree(net->dev_name_head);
8585 kfree(net->dev_index_head);
8586}
8587
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008588static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07008589 .init = netdev_init,
8590 .exit = netdev_exit,
8591};
8592
Pavel Emelyanov46650792007-10-08 20:38:39 -07008593static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008594{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008595 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02008596 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008597 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02008598 * initial network namespace
8599 */
8600 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008601 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008602 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008603 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02008604
8605 /* Ignore unmoveable devices (i.e. loopback) */
8606 if (dev->features & NETIF_F_NETNS_LOCAL)
8607 continue;
8608
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008609 /* Leave virtual devices for the generic cleanup */
8610 if (dev->rtnl_link_ops)
8611 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08008612
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008613 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008614 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8615 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008616 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008617 pr_emerg("%s: failed to move %s to init_net: %d\n",
8618 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008619 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02008620 }
8621 }
8622 rtnl_unlock();
8623}
8624
Eric W. Biederman50624c92013-09-23 21:19:49 -07008625static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8626{
8627 /* Return with the rtnl_lock held when there are no network
8628 * devices unregistering in any network namespace in net_list.
8629 */
8630 struct net *net;
8631 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01008632 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008633
Peter Zijlstraff960a72014-10-29 17:04:56 +01008634 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008635 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07008636 unregistering = false;
8637 rtnl_lock();
8638 list_for_each_entry(net, net_list, exit_list) {
8639 if (net->dev_unreg_count > 0) {
8640 unregistering = true;
8641 break;
8642 }
8643 }
8644 if (!unregistering)
8645 break;
8646 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01008647
8648 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008649 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01008650 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008651}
8652
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008653static void __net_exit default_device_exit_batch(struct list_head *net_list)
8654{
8655 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04008656 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008657 * Do this across as many network namespaces as possible to
8658 * improve batching efficiency.
8659 */
8660 struct net_device *dev;
8661 struct net *net;
8662 LIST_HEAD(dev_kill_list);
8663
Eric W. Biederman50624c92013-09-23 21:19:49 -07008664 /* To prevent network device cleanup code from dereferencing
8665 * loopback devices or network devices that have been freed
8666 * wait here for all pending unregistrations to complete,
8667 * before unregistring the loopback device and allowing the
8668 * network namespace be freed.
8669 *
8670 * The netdev todo list containing all network devices
8671 * unregistrations that happen in default_device_exit_batch
8672 * will run in the rtnl_unlock() at the end of
8673 * default_device_exit_batch.
8674 */
8675 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008676 list_for_each_entry(net, net_list, exit_list) {
8677 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02008678 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008679 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8680 else
8681 unregister_netdevice_queue(dev, &dev_kill_list);
8682 }
8683 }
8684 unregister_netdevice_many(&dev_kill_list);
8685 rtnl_unlock();
8686}
8687
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008688static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008689 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008690 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02008691};
8692
Linus Torvalds1da177e2005-04-16 15:20:36 -07008693/*
8694 * Initialize the DEV module. At boot time this walks the device list and
8695 * unhooks any devices that fail to initialise (normally hardware not
8696 * present) and leaves us with a valid list of present and active devices.
8697 *
8698 */
8699
8700/*
8701 * This is called single threaded during boot, so no need
8702 * to take the rtnl semaphore.
8703 */
8704static int __init net_dev_init(void)
8705{
8706 int i, rc = -ENOMEM;
8707
8708 BUG_ON(!dev_boot_phase);
8709
Linus Torvalds1da177e2005-04-16 15:20:36 -07008710 if (dev_proc_init())
8711 goto out;
8712
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008713 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07008714 goto out;
8715
8716 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08008717 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008718 INIT_LIST_HEAD(&ptype_base[i]);
8719
Vlad Yasevich62532da2012-11-15 08:49:10 +00008720 INIT_LIST_HEAD(&offload_base);
8721
Eric W. Biederman881d9662007-09-17 11:56:21 -07008722 if (register_pernet_subsys(&netdev_net_ops))
8723 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008724
8725 /*
8726 * Initialise the packet receive queues.
8727 */
8728
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07008729 for_each_possible_cpu(i) {
Eric Dumazet41852492016-08-26 12:50:39 -07008730 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008731 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008732
Eric Dumazet41852492016-08-26 12:50:39 -07008733 INIT_WORK(flush, flush_backlog);
8734
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008735 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07008736 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008737 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00008738 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00008739#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008740 sd->csd.func = rps_trigger_softirq;
8741 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008742 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07008743#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00008744
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008745 sd->backlog.poll = process_backlog;
8746 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008747 }
8748
Linus Torvalds1da177e2005-04-16 15:20:36 -07008749 dev_boot_phase = 0;
8750
Eric W. Biederman505d4f72008-11-07 22:54:20 -08008751 /* The loopback device is special if any other network devices
8752 * is present in a network namespace the loopback device must
8753 * be present. Since we now dynamically allocate and free the
8754 * loopback device ensure this invariant is maintained by
8755 * keeping the loopback device as the first device on the
8756 * list of network devices. Ensuring the loopback devices
8757 * is the first device that appears and the last network device
8758 * that disappears.
8759 */
8760 if (register_pernet_device(&loopback_net_ops))
8761 goto out;
8762
8763 if (register_pernet_device(&default_device_ops))
8764 goto out;
8765
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008766 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8767 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008768
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008769 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8770 NULL, dev_cpu_dead);
8771 WARN_ON(rc < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008772 rc = 0;
8773out:
8774 return rc;
8775}
8776
8777subsys_initcall(net_dev_init);