blob: eca5458b2753dc5103f79519ba1c8d087a471e7e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
tcharding722c9a02017-02-09 17:56:04 +11002 * NET3 Protocol independent device support routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
tcharding722c9a02017-02-09 17:56:04 +110010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
tcharding722c9a02017-02-09 17:56:04 +110024 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
tcharding722c9a02017-02-09 17:56:04 +110039 * Alan Cox : Fix ETH_P_ALL echoback lengths.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
tcharding722c9a02017-02-09 17:56:04 +110049 * Alan Cox : Fixed nasty side effect of device close
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
tcharding722c9a02017-02-09 17:56:04 +110070 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * - netif_rx() feedback
73 */
74
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080075#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070084#include <linux/sched/mm.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080085#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070095#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/notifier.h>
97#include <linux/skbuff.h>
Brenden Blancoa7862b42016-07-19 12:16:48 -070098#include <linux/bpf.h>
David S. Millerb5cdae32017-04-18 15:36:58 -040099#include <linux/bpf_trace.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200100#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -0800102#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700106#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/pkt_sched.h>
Jiri Pirko87d83092017-05-17 11:07:54 +0200108#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000110#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/highmem.h>
112#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500119#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700120#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700121#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700122#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700123#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700124#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700126#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700127#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700128#include <linux/ipv6.h>
129#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700130#include <linux/jhash.h>
131#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700132#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900133#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900134#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000135#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700136#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000137#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100138#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300139#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700140#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100141#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400142#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800143#include <linux/hrtimer.h>
Pablo Neirae687ad62015-05-13 18:19:38 +0200144#include <linux/netfilter_ingress.h>
Hariprasad Shenai40e4e712016-06-08 18:09:08 +0530145#include <linux/crash_dump.h>
Davide Carattib72b5bf2017-05-18 15:44:38 +0200146#include <linux/sctp.h>
Sabrina Dubrocaae847f42017-07-21 12:49:31 +0200147#include <net/udp_tunnel.h>
Nicolas Dichtel6621dd22017-10-03 13:53:23 +0200148#include <linux/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700150#include "net-sysfs.h"
151
Herbert Xud565b0a2008-12-15 23:38:52 -0800152/* Instead of increasing this, you should create a hash table. */
153#define MAX_GRO_SKBS 8
154
Herbert Xu5d38a072009-01-04 16:13:40 -0800155/* This should be increased if a protocol with a bigger head is added. */
156#define GRO_MAX_HEAD (MAX_HEADER + 128)
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000159static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000160struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
161struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000162static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000164static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli54951192014-07-01 21:39:43 -0700165static int call_netdevice_notifiers_info(unsigned long val,
Loic Prylli54951192014-07-01 21:39:43 -0700166 struct netdev_notifier_info *info);
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200167static struct napi_struct *napi_by_id(unsigned int napi_id);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 * semaphore.
172 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 *
175 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700176 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 * actual updates. This allows pure readers to access the list even
178 * while a writer is preparing to update it.
179 *
180 * To put it another way, dev_base_lock is held for writing only to
181 * protect against pure readers; the rtnl semaphore provides the
182 * protection against other writers.
183 *
184 * See, for example usages, register_netdevice() and
185 * unregister_netdevice(), which must be called with the rtnl
186 * semaphore held.
187 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189EXPORT_SYMBOL(dev_base_lock);
190
Florian Westphal6c557002017-10-02 23:50:05 +0200191static DEFINE_MUTEX(ifalias_mutex);
192
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300193/* protects napi_hash addition/deletion and napi_gen_id */
194static DEFINE_SPINLOCK(napi_hash_lock);
195
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800196static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800197static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300198
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200199static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000200
Thomas Graf4e985ad2011-06-21 03:11:20 +0000201static inline void dev_base_seq_inc(struct net *net)
202{
tcharding643aa9c2017-02-09 17:56:05 +1100203 while (++net->dev_base_seq == 0)
204 ;
Thomas Graf4e985ad2011-06-21 03:11:20 +0000205}
206
Eric W. Biederman881d9662007-09-17 11:56:21 -0700207static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
Linus Torvalds8387ff22016-06-10 07:51:30 -0700209 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
Eric Dumazet95c96172012-04-15 05:58:06 +0000210
stephen hemminger08e98972009-11-10 07:20:34 +0000211 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Eric W. Biederman881d9662007-09-17 11:56:21 -0700214static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700216 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000219static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000220{
221#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000222 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000223#endif
224}
225
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000226static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000227{
228#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000229 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000230#endif
231}
232
Eric W. Biedermance286d32007-09-12 13:53:49 +0200233/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000234static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200235{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900236 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200237
238 ASSERT_RTNL();
239
240 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800241 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000242 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000243 hlist_add_head_rcu(&dev->index_hlist,
244 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200245 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000246
247 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200248}
249
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000250/* Device list removal
251 * caller must respect a RCU grace period before freeing/reusing dev
252 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200253static void unlist_netdevice(struct net_device *dev)
254{
255 ASSERT_RTNL();
256
257 /* Unlink dev from the device chain */
258 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800259 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000260 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000261 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200262 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000263
264 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200265}
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267/*
268 * Our notifier list
269 */
270
Alan Sternf07d5b92006-05-09 15:23:03 -0700271static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273/*
274 * Device drivers call our routines to queue packets here. We empty the
275 * queue in the local softnet handler.
276 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700277
Eric Dumazet9958da02010-04-17 04:17:02 +0000278DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700279EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
David S. Millercf508b12008-07-22 14:16:42 -0700281#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700282/*
David S. Millerc773e842008-07-08 23:13:53 -0700283 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700284 * according to dev->type
285 */
tcharding643aa9c2017-02-09 17:56:05 +1100286static const unsigned short netdev_lock_type[] = {
287 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700288 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
289 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
290 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
291 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
292 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
293 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
294 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
295 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
296 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
297 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
298 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400299 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
300 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
301 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700302
tcharding643aa9c2017-02-09 17:56:05 +1100303static const char *const netdev_lock_name[] = {
304 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
305 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
306 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
307 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
308 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
309 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
310 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
311 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
312 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
313 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
314 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
315 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
316 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
317 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
318 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700319
320static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700321static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700322
323static inline unsigned short netdev_lock_pos(unsigned short dev_type)
324{
325 int i;
326
327 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
328 if (netdev_lock_type[i] == dev_type)
329 return i;
330 /* the last key is used by default */
331 return ARRAY_SIZE(netdev_lock_type) - 1;
332}
333
David S. Millercf508b12008-07-22 14:16:42 -0700334static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
335 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700336{
337 int i;
338
339 i = netdev_lock_pos(dev_type);
340 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
341 netdev_lock_name[i]);
342}
David S. Millercf508b12008-07-22 14:16:42 -0700343
344static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
345{
346 int i;
347
348 i = netdev_lock_pos(dev->type);
349 lockdep_set_class_and_name(&dev->addr_list_lock,
350 &netdev_addr_lock_key[i],
351 netdev_lock_name[i]);
352}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700353#else
David S. Millercf508b12008-07-22 14:16:42 -0700354static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
355 unsigned short dev_type)
356{
357}
358static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700359{
360}
361#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100364 *
365 * Protocol management and registration routines
366 *
367 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 * Add a protocol ID to the list. Now that the input handler is
372 * smarter we can dispense with all the messy stuff that used to be
373 * here.
374 *
375 * BEWARE!!! Protocol handlers, mangling input packets,
376 * MUST BE last in hash buckets and checking protocol handlers
377 * MUST start from promiscuous ptype_all chain in net_bh.
378 * It is true now, do not change it.
379 * Explanation follows: if protocol handler, mangling packet, will
380 * be the first on list, it is not able to sense, that packet
381 * is cloned and should be copied-on-write, so that it will
382 * change it and subsequent readers will get broken packet.
383 * --ANK (980803)
384 */
385
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000386static inline struct list_head *ptype_head(const struct packet_type *pt)
387{
388 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800389 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000390 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800391 return pt->dev ? &pt->dev->ptype_specific :
392 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000393}
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395/**
396 * dev_add_pack - add packet handler
397 * @pt: packet type declaration
398 *
399 * Add a protocol handler to the networking stack. The passed &packet_type
400 * is linked into kernel lists and may not be freed until it has been
401 * removed from the kernel lists.
402 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900403 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 * guarantee all CPU's that are in middle of receiving packets
405 * will see the new packet type (until the next received packet).
406 */
407
408void dev_add_pack(struct packet_type *pt)
409{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000410 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000412 spin_lock(&ptype_lock);
413 list_add_rcu(&pt->list, head);
414 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700416EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/**
419 * __dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
421 *
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900425 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 *
427 * The packet type might still be in use by receivers
428 * and must not be freed until after all the CPU's have gone
429 * through a quiescent state.
430 */
431void __dev_remove_pack(struct packet_type *pt)
432{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000433 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 struct packet_type *pt1;
435
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000436 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 list_for_each_entry(pt1, head, list) {
439 if (pt == pt1) {
440 list_del_rcu(&pt->list);
441 goto out;
442 }
443 }
444
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000445 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000447 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700449EXPORT_SYMBOL(__dev_remove_pack);
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451/**
452 * dev_remove_pack - remove packet handler
453 * @pt: packet type declaration
454 *
455 * Remove a protocol handler that was previously added to the kernel
456 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
457 * from the kernel lists and can be freed or reused once this function
458 * returns.
459 *
460 * This call sleeps to guarantee that no CPU is looking at the packet
461 * type after return.
462 */
463void dev_remove_pack(struct packet_type *pt)
464{
465 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 synchronize_net();
468}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700469EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Vlad Yasevich62532da2012-11-15 08:49:10 +0000471
472/**
473 * dev_add_offload - register offload handlers
474 * @po: protocol offload declaration
475 *
476 * Add protocol offload handlers to the networking stack. The passed
477 * &proto_offload is linked into kernel lists and may not be freed until
478 * it has been removed from the kernel lists.
479 *
480 * This call does not sleep therefore it can not
481 * guarantee all CPU's that are in middle of receiving packets
482 * will see the new offload handlers (until the next received packet).
483 */
484void dev_add_offload(struct packet_offload *po)
485{
David S. Millerbdef7de2015-06-01 14:56:09 -0700486 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000487
488 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700489 list_for_each_entry(elem, &offload_base, list) {
490 if (po->priority < elem->priority)
491 break;
492 }
493 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000494 spin_unlock(&offload_lock);
495}
496EXPORT_SYMBOL(dev_add_offload);
497
498/**
499 * __dev_remove_offload - remove offload handler
500 * @po: packet offload declaration
501 *
502 * Remove a protocol offload handler that was previously added to the
503 * kernel offload handlers by dev_add_offload(). The passed &offload_type
504 * is removed from the kernel lists and can be freed or reused once this
505 * function returns.
506 *
507 * The packet type might still be in use by receivers
508 * and must not be freed until after all the CPU's have gone
509 * through a quiescent state.
510 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800511static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000512{
513 struct list_head *head = &offload_base;
514 struct packet_offload *po1;
515
Eric Dumazetc53aa502012-11-16 08:08:23 +0000516 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000517
518 list_for_each_entry(po1, head, list) {
519 if (po == po1) {
520 list_del_rcu(&po->list);
521 goto out;
522 }
523 }
524
525 pr_warn("dev_remove_offload: %p not found\n", po);
526out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000527 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000528}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000529
530/**
531 * dev_remove_offload - remove packet offload handler
532 * @po: packet offload declaration
533 *
534 * Remove a packet offload handler that was previously added to the kernel
535 * offload handlers by dev_add_offload(). The passed &offload_type is
536 * removed from the kernel lists and can be freed or reused once this
537 * function returns.
538 *
539 * This call sleeps to guarantee that no CPU is looking at the packet
540 * type after return.
541 */
542void dev_remove_offload(struct packet_offload *po)
543{
544 __dev_remove_offload(po);
545
546 synchronize_net();
547}
548EXPORT_SYMBOL(dev_remove_offload);
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550/******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100551 *
552 * Device Boot-time Settings Routines
553 *
554 ******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
556/* Boot time configuration table */
557static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
558
559/**
560 * netdev_boot_setup_add - add new setup entry
561 * @name: name of the device
562 * @map: configured settings for the device
563 *
564 * Adds new setup entry to the dev_boot_setup list. The function
565 * returns 0 on error and 1 on success. This is a generic routine to
566 * all netdevices.
567 */
568static int netdev_boot_setup_add(char *name, struct ifmap *map)
569{
570 struct netdev_boot_setup *s;
571 int i;
572
573 s = dev_boot_setup;
574 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
575 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
576 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700577 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 memcpy(&s[i].map, map, sizeof(s[i].map));
579 break;
580 }
581 }
582
583 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
584}
585
586/**
tcharding722c9a02017-02-09 17:56:04 +1100587 * netdev_boot_setup_check - check boot time settings
588 * @dev: the netdevice
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 *
tcharding722c9a02017-02-09 17:56:04 +1100590 * Check boot time settings for the device.
591 * The found settings are set for the device to be used
592 * later in the device probing.
593 * Returns 0 if no settings found, 1 if they are.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 */
595int netdev_boot_setup_check(struct net_device *dev)
596{
597 struct netdev_boot_setup *s = dev_boot_setup;
598 int i;
599
600 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
601 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700602 !strcmp(dev->name, s[i].name)) {
tcharding722c9a02017-02-09 17:56:04 +1100603 dev->irq = s[i].map.irq;
604 dev->base_addr = s[i].map.base_addr;
605 dev->mem_start = s[i].map.mem_start;
606 dev->mem_end = s[i].map.mem_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return 1;
608 }
609 }
610 return 0;
611}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700612EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614
615/**
tcharding722c9a02017-02-09 17:56:04 +1100616 * netdev_boot_base - get address from boot time settings
617 * @prefix: prefix for network device
618 * @unit: id for network device
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 *
tcharding722c9a02017-02-09 17:56:04 +1100620 * Check boot time settings for the base address of device.
621 * The found settings are set for the device to be used
622 * later in the device probing.
623 * Returns 0 if no settings found.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 */
625unsigned long netdev_boot_base(const char *prefix, int unit)
626{
627 const struct netdev_boot_setup *s = dev_boot_setup;
628 char name[IFNAMSIZ];
629 int i;
630
631 sprintf(name, "%s%d", prefix, unit);
632
633 /*
634 * If device already registered then return base of 1
635 * to indicate not to probe for this interface
636 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700637 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 return 1;
639
640 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
641 if (!strcmp(name, s[i].name))
642 return s[i].map.base_addr;
643 return 0;
644}
645
646/*
647 * Saves at boot time configured settings for any netdevice.
648 */
649int __init netdev_boot_setup(char *str)
650{
651 int ints[5];
652 struct ifmap map;
653
654 str = get_options(str, ARRAY_SIZE(ints), ints);
655 if (!str || !*str)
656 return 0;
657
658 /* Save settings */
659 memset(&map, 0, sizeof(map));
660 if (ints[0] > 0)
661 map.irq = ints[1];
662 if (ints[0] > 1)
663 map.base_addr = ints[2];
664 if (ints[0] > 2)
665 map.mem_start = ints[3];
666 if (ints[0] > 3)
667 map.mem_end = ints[4];
668
669 /* Add new entry to the list */
670 return netdev_boot_setup_add(str, &map);
671}
672
673__setup("netdev=", netdev_boot_setup);
674
675/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100676 *
677 * Device Interface Subroutines
678 *
679 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
681/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200682 * dev_get_iflink - get 'iflink' value of a interface
683 * @dev: targeted interface
684 *
685 * Indicates the ifindex the interface is linked to.
686 * Physical interfaces have the same 'ifindex' and 'iflink' values.
687 */
688
689int dev_get_iflink(const struct net_device *dev)
690{
691 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
692 return dev->netdev_ops->ndo_get_iflink(dev);
693
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200694 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200695}
696EXPORT_SYMBOL(dev_get_iflink);
697
698/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700699 * dev_fill_metadata_dst - Retrieve tunnel egress information.
700 * @dev: targeted interface
701 * @skb: The packet.
702 *
703 * For better visibility of tunnel traffic OVS needs to retrieve
704 * egress tunnel information for a packet. Following API allows
705 * user to get this info.
706 */
707int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
708{
709 struct ip_tunnel_info *info;
710
711 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
712 return -EINVAL;
713
714 info = skb_tunnel_info_unclone(skb);
715 if (!info)
716 return -ENOMEM;
717 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
718 return -EINVAL;
719
720 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
721}
722EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
723
724/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700726 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 * @name: name to find
728 *
729 * Find an interface by name. Must be called under RTNL semaphore
730 * or @dev_base_lock. If the name is found a pointer to the device
731 * is returned. If the name is not found then %NULL is returned. The
732 * reference counters are not incremented so the caller must be
733 * careful with locks.
734 */
735
Eric W. Biederman881d9662007-09-17 11:56:21 -0700736struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700738 struct net_device *dev;
739 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Sasha Levinb67bfe02013-02-27 17:06:00 -0800741 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 if (!strncmp(dev->name, name, IFNAMSIZ))
743 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700744
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 return NULL;
746}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700747EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
749/**
tcharding722c9a02017-02-09 17:56:04 +1100750 * dev_get_by_name_rcu - find a device by its name
751 * @net: the applicable net namespace
752 * @name: name to find
Eric Dumazet72c95282009-10-30 07:11:27 +0000753 *
tcharding722c9a02017-02-09 17:56:04 +1100754 * Find an interface by name.
755 * If the name is found a pointer to the device is returned.
756 * If the name is not found then %NULL is returned.
757 * The reference counters are not incremented so the caller must be
758 * careful with locks. The caller must hold RCU lock.
Eric Dumazet72c95282009-10-30 07:11:27 +0000759 */
760
761struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
762{
Eric Dumazet72c95282009-10-30 07:11:27 +0000763 struct net_device *dev;
764 struct hlist_head *head = dev_name_hash(net, name);
765
Sasha Levinb67bfe02013-02-27 17:06:00 -0800766 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000767 if (!strncmp(dev->name, name, IFNAMSIZ))
768 return dev;
769
770 return NULL;
771}
772EXPORT_SYMBOL(dev_get_by_name_rcu);
773
774/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700776 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 * @name: name to find
778 *
779 * Find an interface by name. This can be called from any
780 * context and does its own locking. The returned handle has
781 * the usage count incremented and the caller must use dev_put() to
782 * release it when it is no longer needed. %NULL is returned if no
783 * matching device is found.
784 */
785
Eric W. Biederman881d9662007-09-17 11:56:21 -0700786struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787{
788 struct net_device *dev;
789
Eric Dumazet72c95282009-10-30 07:11:27 +0000790 rcu_read_lock();
791 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 if (dev)
793 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000794 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 return dev;
796}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700797EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799/**
800 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700801 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 * @ifindex: index of device
803 *
804 * Search for an interface by index. Returns %NULL if the device
805 * is not found or a pointer to the device. The device has not
806 * had its reference counter increased so the caller must be careful
807 * about locking. The caller must hold either the RTNL semaphore
808 * or @dev_base_lock.
809 */
810
Eric W. Biederman881d9662007-09-17 11:56:21 -0700811struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700813 struct net_device *dev;
814 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
Sasha Levinb67bfe02013-02-27 17:06:00 -0800816 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 if (dev->ifindex == ifindex)
818 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 return NULL;
821}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700822EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000824/**
825 * dev_get_by_index_rcu - find a device by its ifindex
826 * @net: the applicable net namespace
827 * @ifindex: index of device
828 *
829 * Search for an interface by index. Returns %NULL if the device
830 * is not found or a pointer to the device. The device has not
831 * had its reference counter increased so the caller must be careful
832 * about locking. The caller must hold RCU lock.
833 */
834
835struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
836{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000837 struct net_device *dev;
838 struct hlist_head *head = dev_index_hash(net, ifindex);
839
Sasha Levinb67bfe02013-02-27 17:06:00 -0800840 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000841 if (dev->ifindex == ifindex)
842 return dev;
843
844 return NULL;
845}
846EXPORT_SYMBOL(dev_get_by_index_rcu);
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849/**
850 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700851 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 * @ifindex: index of device
853 *
854 * Search for an interface by index. Returns NULL if the device
855 * is not found or a pointer to the device. The device returned has
856 * had a reference added and the pointer is safe until the user calls
857 * dev_put to indicate they have finished with it.
858 */
859
Eric W. Biederman881d9662007-09-17 11:56:21 -0700860struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
862 struct net_device *dev;
863
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000864 rcu_read_lock();
865 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (dev)
867 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000868 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 return dev;
870}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700871EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873/**
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200874 * dev_get_by_napi_id - find a device by napi_id
875 * @napi_id: ID of the NAPI struct
876 *
877 * Search for an interface by NAPI ID. Returns %NULL if the device
878 * is not found or a pointer to the device. The device has not had
879 * its reference counter increased so the caller must be careful
880 * about locking. The caller must hold RCU lock.
881 */
882
883struct net_device *dev_get_by_napi_id(unsigned int napi_id)
884{
885 struct napi_struct *napi;
886
887 WARN_ON_ONCE(!rcu_read_lock_held());
888
889 if (napi_id < MIN_NAPI_ID)
890 return NULL;
891
892 napi = napi_by_id(napi_id);
893
894 return napi ? napi->dev : NULL;
895}
896EXPORT_SYMBOL(dev_get_by_napi_id);
897
898/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200899 * netdev_get_name - get a netdevice name, knowing its ifindex.
900 * @net: network namespace
901 * @name: a pointer to the buffer where the name will be stored.
902 * @ifindex: the ifindex of the interface to get the name from.
903 *
904 * The use of raw_seqcount_begin() and cond_resched() before
905 * retrying is required as we want to give the writers a chance
906 * to complete when CONFIG_PREEMPT is not set.
907 */
908int netdev_get_name(struct net *net, char *name, int ifindex)
909{
910 struct net_device *dev;
911 unsigned int seq;
912
913retry:
914 seq = raw_seqcount_begin(&devnet_rename_seq);
915 rcu_read_lock();
916 dev = dev_get_by_index_rcu(net, ifindex);
917 if (!dev) {
918 rcu_read_unlock();
919 return -ENODEV;
920 }
921
922 strcpy(name, dev->name);
923 rcu_read_unlock();
924 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
925 cond_resched();
926 goto retry;
927 }
928
929 return 0;
930}
931
932/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000933 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700934 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 * @type: media type of device
936 * @ha: hardware address
937 *
938 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800939 * is not found or a pointer to the device.
940 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000941 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 * and the caller must therefore be careful about locking
943 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 */
945
Eric Dumazet941666c2010-12-05 01:23:53 +0000946struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
947 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
949 struct net_device *dev;
950
Eric Dumazet941666c2010-12-05 01:23:53 +0000951 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 if (dev->type == type &&
953 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700954 return dev;
955
956 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957}
Eric Dumazet941666c2010-12-05 01:23:53 +0000958EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300959
Eric W. Biederman881d9662007-09-17 11:56:21 -0700960struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700961{
962 struct net_device *dev;
963
964 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700965 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700966 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700967 return dev;
968
969 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700970}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700971EXPORT_SYMBOL(__dev_getfirstbyhwtype);
972
Eric W. Biederman881d9662007-09-17 11:56:21 -0700973struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000975 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000977 rcu_read_lock();
978 for_each_netdev_rcu(net, dev)
979 if (dev->type == type) {
980 dev_hold(dev);
981 ret = dev;
982 break;
983 }
984 rcu_read_unlock();
985 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987EXPORT_SYMBOL(dev_getfirstbyhwtype);
988
989/**
WANG Cong6c555492014-09-11 15:35:09 -0700990 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700991 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 * @if_flags: IFF_* values
993 * @mask: bitmask of bits in if_flags to check
994 *
995 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000996 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700997 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 */
999
WANG Cong6c555492014-09-11 15:35:09 -07001000struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1001 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002{
Pavel Emelianov7562f872007-05-03 15:13:45 -07001003 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
WANG Cong6c555492014-09-11 15:35:09 -07001005 ASSERT_RTNL();
1006
Pavel Emelianov7562f872007-05-03 15:13:45 -07001007 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -07001008 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -07001010 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 break;
1012 }
1013 }
Pavel Emelianov7562f872007-05-03 15:13:45 -07001014 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
WANG Cong6c555492014-09-11 15:35:09 -07001016EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018/**
1019 * dev_valid_name - check if name is okay for network device
1020 * @name: name string
1021 *
1022 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -07001023 * to allow sysfs to work. We also disallow any kind of
1024 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 */
David S. Miller95f050b2012-03-06 16:12:15 -05001026bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
David S. Millerc7fa9d12006-08-15 16:34:13 -07001028 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -05001029 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07001030 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -05001031 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001032 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -05001033 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001034
1035 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -06001036 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001037 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001038 name++;
1039 }
David S. Miller95f050b2012-03-06 16:12:15 -05001040 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001042EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
1044/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001045 * __dev_alloc_name - allocate a name for a device
1046 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001048 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 *
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1054 * duplicates.
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 */
1058
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001059static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060{
1061 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 const char *p;
1063 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001064 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 struct net_device *d;
1066
Rasmus Villemoes93809102017-11-13 00:15:08 +01001067 if (!dev_valid_name(name))
1068 return -EINVAL;
1069
Rasmus Villemoes51f299d2017-11-13 00:15:04 +01001070 p = strchr(name, '%');
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 if (p) {
1072 /*
1073 * Verify the string as this thing may have come from
1074 * the user. There must be either one "%d" and no other "%"
1075 * characters.
1076 */
1077 if (p[1] != 'd' || strchr(p + 2, '%'))
1078 return -EINVAL;
1079
1080 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001081 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 if (!inuse)
1083 return -ENOMEM;
1084
Eric W. Biederman881d9662007-09-17 11:56:21 -07001085 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 if (!sscanf(d->name, name, &i))
1087 continue;
1088 if (i < 0 || i >= max_netdevices)
1089 continue;
1090
1091 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001092 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (!strncmp(buf, d->name, IFNAMSIZ))
1094 set_bit(i, inuse);
1095 }
1096
1097 i = find_first_zero_bit(inuse, max_netdevices);
1098 free_page((unsigned long) inuse);
1099 }
1100
Rasmus Villemoes6224abd2017-11-13 00:15:07 +01001101 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001102 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
1105 /* It is possible to run out of possible slots
1106 * when the name is long and there isn't enough space left
1107 * for the digits, or if all bits are used.
1108 */
Johannes Berg029b6d12017-12-02 08:41:55 +01001109 return -ENFILE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110}
1111
Rasmus Villemoes2c88b852017-11-13 00:15:05 +01001112static int dev_alloc_name_ns(struct net *net,
1113 struct net_device *dev,
1114 const char *name)
1115{
1116 char buf[IFNAMSIZ];
1117 int ret;
1118
Rasmus Villemoesc46d7642017-11-13 00:15:06 +01001119 BUG_ON(!net);
Rasmus Villemoes2c88b852017-11-13 00:15:05 +01001120 ret = __dev_alloc_name(net, name, buf);
1121 if (ret >= 0)
1122 strlcpy(dev->name, buf, IFNAMSIZ);
1123 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124}
1125
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001126/**
1127 * dev_alloc_name - allocate a name for a device
1128 * @dev: device
1129 * @name: name format string
1130 *
1131 * Passed a format string - eg "lt%d" it will try and find a suitable
1132 * id. It scans list of devices to build up a free map, then chooses
1133 * the first empty slot. The caller must hold the dev_base or rtnl lock
1134 * while allocating the name and adding the device in order to avoid
1135 * duplicates.
1136 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1137 * Returns the number of the unit assigned or a negative errno code.
1138 */
1139
1140int dev_alloc_name(struct net_device *dev, const char *name)
1141{
Rasmus Villemoesc46d7642017-11-13 00:15:06 +01001142 return dev_alloc_name_ns(dev_net(dev), dev, name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001143}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001144EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001145
Cong Wang0ad646c2017-10-13 11:58:53 -07001146int dev_get_valid_name(struct net *net, struct net_device *dev,
1147 const char *name)
Gao feng828de4f2012-09-13 20:58:27 +00001148{
David S. Miller55a5ec92018-01-02 11:45:07 -05001149 BUG_ON(!net);
1150
1151 if (!dev_valid_name(name))
1152 return -EINVAL;
1153
1154 if (strchr(name, '%'))
1155 return dev_alloc_name_ns(net, dev, name);
1156 else if (__dev_get_by_name(net, name))
1157 return -EEXIST;
1158 else if (dev->name != name)
1159 strlcpy(dev->name, name, IFNAMSIZ);
1160
1161 return 0;
Octavian Purdilad9031022009-11-18 02:36:59 +00001162}
Cong Wang0ad646c2017-10-13 11:58:53 -07001163EXPORT_SYMBOL(dev_get_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165/**
1166 * dev_change_name - change name of a device
1167 * @dev: device
1168 * @newname: name (or format string) must be at least IFNAMSIZ
1169 *
1170 * Change name of a device, can pass format strings "eth%d".
1171 * for wildcarding.
1172 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001173int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
Tom Gundersen238fa362014-07-14 16:37:23 +02001175 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001176 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001178 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001179 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001182 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001184 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 if (dev->flags & IFF_UP)
1186 return -EBUSY;
1187
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001188 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001189
1190 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001191 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001192 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001193 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001194
Herbert Xufcc5a032007-07-30 17:03:38 -07001195 memcpy(oldname, dev->name, IFNAMSIZ);
1196
Gao feng828de4f2012-09-13 20:58:27 +00001197 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001198 if (err < 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001199 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001200 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001203 if (oldname[0] && !strchr(oldname, '%'))
1204 netdev_info(dev, "renamed from %s\n", oldname);
1205
Tom Gundersen238fa362014-07-14 16:37:23 +02001206 old_assign_type = dev->name_assign_type;
1207 dev->name_assign_type = NET_NAME_RENAMED;
1208
Herbert Xufcc5a032007-07-30 17:03:38 -07001209rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001210 ret = device_rename(&dev->dev, dev->name);
1211 if (ret) {
1212 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001213 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001214 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001215 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001216 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001217
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001218 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001219
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001220 netdev_adjacent_rename_links(dev, oldname);
1221
Herbert Xu7f988ea2007-07-30 16:35:46 -07001222 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001223 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001224 write_unlock_bh(&dev_base_lock);
1225
1226 synchronize_rcu();
1227
1228 write_lock_bh(&dev_base_lock);
1229 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001230 write_unlock_bh(&dev_base_lock);
1231
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001232 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001233 ret = notifier_to_errno(ret);
1234
1235 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001236 /* err >= 0 after dev_alloc_name() or stores the first errno */
1237 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001238 err = ret;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001239 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001240 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001241 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001242 dev->name_assign_type = old_assign_type;
1243 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001244 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001245 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001246 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001247 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001248 }
1249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
1251 return err;
1252}
1253
1254/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001255 * dev_set_alias - change ifalias of a device
1256 * @dev: device
1257 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001258 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001259 *
1260 * Set ifalias for a device,
1261 */
1262int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1263{
Florian Westphal6c557002017-10-02 23:50:05 +02001264 struct dev_ifalias *new_alias = NULL;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001265
1266 if (len >= IFALIASZ)
1267 return -EINVAL;
1268
Florian Westphal6c557002017-10-02 23:50:05 +02001269 if (len) {
1270 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1271 if (!new_alias)
1272 return -ENOMEM;
1273
1274 memcpy(new_alias->ifalias, alias, len);
1275 new_alias->ifalias[len] = 0;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001276 }
1277
Florian Westphal6c557002017-10-02 23:50:05 +02001278 mutex_lock(&ifalias_mutex);
1279 rcu_swap_protected(dev->ifalias, new_alias,
1280 mutex_is_locked(&ifalias_mutex));
1281 mutex_unlock(&ifalias_mutex);
1282
1283 if (new_alias)
1284 kfree_rcu(new_alias, rcuhead);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001285
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001286 return len;
1287}
1288
Florian Westphal6c557002017-10-02 23:50:05 +02001289/**
1290 * dev_get_alias - get ifalias of a device
1291 * @dev: device
Florian Westphal20e88322017-10-04 13:56:50 +02001292 * @name: buffer to store name of ifalias
Florian Westphal6c557002017-10-02 23:50:05 +02001293 * @len: size of buffer
1294 *
1295 * get ifalias for a device. Caller must make sure dev cannot go
1296 * away, e.g. rcu read lock or own a reference count to device.
1297 */
1298int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1299{
1300 const struct dev_ifalias *alias;
1301 int ret = 0;
1302
1303 rcu_read_lock();
1304 alias = rcu_dereference(dev->ifalias);
1305 if (alias)
1306 ret = snprintf(name, len, "%s", alias->ifalias);
1307 rcu_read_unlock();
1308
1309 return ret;
1310}
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001311
1312/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001313 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001314 * @dev: device to cause notification
1315 *
1316 * Called to indicate a device has changed features.
1317 */
1318void netdev_features_change(struct net_device *dev)
1319{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001320 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001321}
1322EXPORT_SYMBOL(netdev_features_change);
1323
1324/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 * netdev_state_change - device changes state
1326 * @dev: device to cause notification
1327 *
1328 * Called to indicate a device has changed state. This function calls
1329 * the notifier chains for netdev_chain and sends a NEWLINK message
1330 * to the routing socket.
1331 */
1332void netdev_state_change(struct net_device *dev)
1333{
1334 if (dev->flags & IFF_UP) {
David Ahern51d0c0472017-10-04 17:48:45 -07001335 struct netdev_notifier_change_info change_info = {
1336 .info.dev = dev,
1337 };
Loic Prylli54951192014-07-01 21:39:43 -07001338
David Ahern51d0c0472017-10-04 17:48:45 -07001339 call_netdevice_notifiers_info(NETDEV_CHANGE,
Loic Prylli54951192014-07-01 21:39:43 -07001340 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001341 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 }
1343}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001344EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
Amerigo Wangee89bab2012-08-09 22:14:56 +00001346/**
tcharding722c9a02017-02-09 17:56:04 +11001347 * netdev_notify_peers - notify network peers about existence of @dev
1348 * @dev: network device
Amerigo Wangee89bab2012-08-09 22:14:56 +00001349 *
1350 * Generate traffic such that interested network peers are aware of
1351 * @dev, such as by generating a gratuitous ARP. This may be used when
1352 * a device wants to inform the rest of the network about some sort of
1353 * reconfiguration such as a failover event or virtual machine
1354 * migration.
1355 */
1356void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001357{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001358 rtnl_lock();
1359 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
Vlad Yasevich37c343b2017-03-14 08:58:08 -04001360 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00001361 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001362}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001363EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001364
Patrick McHardybd380812010-02-26 06:34:53 +00001365static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001367 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001368 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001370 ASSERT_RTNL();
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 if (!netif_device_present(dev))
1373 return -ENODEV;
1374
Neil Hormanca99ca12013-02-05 08:05:43 +00001375 /* Block netpoll from trying to do any rx path servicing.
1376 * If we don't do this there is a chance ndo_poll_controller
1377 * or ndo_poll may be running while we open the device
1378 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001379 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001380
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001381 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1382 ret = notifier_to_errno(ret);
1383 if (ret)
1384 return ret;
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001387
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001388 if (ops->ndo_validate_addr)
1389 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001390
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001391 if (!ret && ops->ndo_open)
1392 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Eric W. Biederman66b55522014-03-27 15:39:03 -07001394 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001395
Jeff Garzikbada3392007-10-23 20:19:37 -07001396 if (ret)
1397 clear_bit(__LINK_STATE_START, &dev->state);
1398 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001400 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001402 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 return ret;
1406}
Patrick McHardybd380812010-02-26 06:34:53 +00001407
1408/**
1409 * dev_open - prepare an interface for use.
1410 * @dev: device to open
1411 *
1412 * Takes a device from down to up state. The device's private open
1413 * function is invoked and then the multicast lists are loaded. Finally
1414 * the device is moved into the up state and a %NETDEV_UP message is
1415 * sent to the netdev notifier chain.
1416 *
1417 * Calling this function on an active interface is a nop. On a failure
1418 * a negative errno code is returned.
1419 */
1420int dev_open(struct net_device *dev)
1421{
1422 int ret;
1423
Patrick McHardybd380812010-02-26 06:34:53 +00001424 if (dev->flags & IFF_UP)
1425 return 0;
1426
Patrick McHardybd380812010-02-26 06:34:53 +00001427 ret = __dev_open(dev);
1428 if (ret < 0)
1429 return ret;
1430
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001431 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001432 call_netdevice_notifiers(NETDEV_UP, dev);
1433
1434 return ret;
1435}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001436EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
stephen hemminger7051b882017-07-18 15:59:27 -07001438static void __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439{
Octavian Purdila44345722010-12-13 12:44:07 +00001440 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001441
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001442 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001443 might_sleep();
1444
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001445 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001446 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001447 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001448
Octavian Purdila44345722010-12-13 12:44:07 +00001449 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Octavian Purdila44345722010-12-13 12:44:07 +00001451 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
Octavian Purdila44345722010-12-13 12:44:07 +00001453 /* Synchronize to scheduled poll. We cannot touch poll list, it
1454 * can be even on different cpu. So just clear netif_running().
1455 *
1456 * dev->stop() will invoke napi_disable() on all of it's
1457 * napi_struct instances on this device.
1458 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001459 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461
Octavian Purdila44345722010-12-13 12:44:07 +00001462 dev_deactivate_many(head);
1463
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001464 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001465 const struct net_device_ops *ops = dev->netdev_ops;
1466
1467 /*
1468 * Call the device specific close. This cannot fail.
1469 * Only if device is UP
1470 *
1471 * We allow it to be called even after a DETACH hot-plug
1472 * event.
1473 */
1474 if (ops->ndo_stop)
1475 ops->ndo_stop(dev);
1476
Octavian Purdila44345722010-12-13 12:44:07 +00001477 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001478 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001479 }
Octavian Purdila44345722010-12-13 12:44:07 +00001480}
1481
stephen hemminger7051b882017-07-18 15:59:27 -07001482static void __dev_close(struct net_device *dev)
Octavian Purdila44345722010-12-13 12:44:07 +00001483{
1484 LIST_HEAD(single);
1485
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001486 list_add(&dev->close_list, &single);
stephen hemminger7051b882017-07-18 15:59:27 -07001487 __dev_close_many(&single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001488 list_del(&single);
Octavian Purdila44345722010-12-13 12:44:07 +00001489}
1490
stephen hemminger7051b882017-07-18 15:59:27 -07001491void dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001492{
1493 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001494
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001495 /* Remove the devices that don't need to be closed */
1496 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001497 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001498 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001499
1500 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001501
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001502 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001503 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001504 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001505 if (unlink)
1506 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
David S. Miller99c4a262015-03-18 22:52:33 -04001509EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001510
1511/**
1512 * dev_close - shutdown an interface.
1513 * @dev: device to shutdown
1514 *
1515 * This function moves an active device into down state. A
1516 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1517 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1518 * chain.
1519 */
stephen hemminger7051b882017-07-18 15:59:27 -07001520void dev_close(struct net_device *dev)
Patrick McHardybd380812010-02-26 06:34:53 +00001521{
Eric Dumazete14a5992011-05-10 12:26:06 -07001522 if (dev->flags & IFF_UP) {
1523 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001524
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001525 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001526 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001527 list_del(&single);
1528 }
Patrick McHardybd380812010-02-26 06:34:53 +00001529}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001530EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001533/**
1534 * dev_disable_lro - disable Large Receive Offload on a device
1535 * @dev: device
1536 *
1537 * Disable Large Receive Offload (LRO) on a net device. Must be
1538 * called under RTNL. This is needed if received packets may be
1539 * forwarded to another interface.
1540 */
1541void dev_disable_lro(struct net_device *dev)
1542{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001543 struct net_device *lower_dev;
1544 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001545
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001546 dev->wanted_features &= ~NETIF_F_LRO;
1547 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001548
Michał Mirosław22d59692011-04-21 12:42:15 +00001549 if (unlikely(dev->features & NETIF_F_LRO))
1550 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001551
1552 netdev_for_each_lower_dev(dev, lower_dev, iter)
1553 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001554}
1555EXPORT_SYMBOL(dev_disable_lro);
1556
Michael Chan56f5aa72017-12-16 03:09:41 -05001557/**
1558 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1559 * @dev: device
1560 *
1561 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1562 * called under RTNL. This is needed if Generic XDP is installed on
1563 * the device.
1564 */
1565static void dev_disable_gro_hw(struct net_device *dev)
1566{
1567 dev->wanted_features &= ~NETIF_F_GRO_HW;
1568 netdev_update_features(dev);
1569
1570 if (unlikely(dev->features & NETIF_F_GRO_HW))
1571 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1572}
1573
Kirill Tkhaiede27622018-03-23 19:47:19 +03001574const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1575{
1576#define N(val) \
1577 case NETDEV_##val: \
1578 return "NETDEV_" __stringify(val);
1579 switch (cmd) {
1580 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1581 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1582 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1583 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1584 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1585 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1586 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
Kirill Tkhaiede27622018-03-23 19:47:19 +03001587 };
1588#undef N
1589 return "UNKNOWN_NETDEV_EVENT";
1590}
1591EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1592
Jiri Pirko351638e2013-05-28 01:30:21 +00001593static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1594 struct net_device *dev)
1595{
David Ahern51d0c0472017-10-04 17:48:45 -07001596 struct netdev_notifier_info info = {
1597 .dev = dev,
1598 };
Jiri Pirko351638e2013-05-28 01:30:21 +00001599
Jiri Pirko351638e2013-05-28 01:30:21 +00001600 return nb->notifier_call(nb, val, &info);
1601}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001602
Eric W. Biederman881d9662007-09-17 11:56:21 -07001603static int dev_boot_phase = 1;
1604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605/**
tcharding722c9a02017-02-09 17:56:04 +11001606 * register_netdevice_notifier - register a network notifier block
1607 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 *
tcharding722c9a02017-02-09 17:56:04 +11001609 * Register a notifier to be called when network device events occur.
1610 * The notifier passed is linked into the kernel structures and must
1611 * not be reused until it has been unregistered. A negative errno code
1612 * is returned on a failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 *
tcharding722c9a02017-02-09 17:56:04 +11001614 * When registered all registration and up events are replayed
1615 * to the new notifier to allow device to have a race free
1616 * view of the network device list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 */
1618
1619int register_netdevice_notifier(struct notifier_block *nb)
1620{
1621 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001622 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001623 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 int err;
1625
1626 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001627 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001628 if (err)
1629 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001630 if (dev_boot_phase)
1631 goto unlock;
Kirill Tkhaif0b07bb12018-03-29 19:20:32 +03001632 down_read(&net_rwsem);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001633 for_each_net(net) {
1634 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001635 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001636 err = notifier_to_errno(err);
1637 if (err)
1638 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
Eric W. Biederman881d9662007-09-17 11:56:21 -07001640 if (!(dev->flags & IFF_UP))
1641 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001642
Jiri Pirko351638e2013-05-28 01:30:21 +00001643 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001644 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 }
Kirill Tkhaif0b07bb12018-03-29 19:20:32 +03001646 up_read(&net_rwsem);
Herbert Xufcc5a032007-07-30 17:03:38 -07001647
1648unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 rtnl_unlock();
1650 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001651
1652rollback:
1653 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001654 for_each_net(net) {
1655 for_each_netdev(net, dev) {
1656 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001657 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001658
Eric W. Biederman881d9662007-09-17 11:56:21 -07001659 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001660 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1661 dev);
1662 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001663 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001664 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001665 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001666 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001667
RongQing.Li8f891482011-11-30 23:43:07 -05001668outroll:
Kirill Tkhaif0b07bb12018-03-29 19:20:32 +03001669 up_read(&net_rwsem);
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001670 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001671 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001673EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
1675/**
tcharding722c9a02017-02-09 17:56:04 +11001676 * unregister_netdevice_notifier - unregister a network notifier block
1677 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 *
tcharding722c9a02017-02-09 17:56:04 +11001679 * Unregister a notifier previously registered by
1680 * register_netdevice_notifier(). The notifier is unlinked into the
1681 * kernel structures and may then be reused. A negative errno code
1682 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001683 *
tcharding722c9a02017-02-09 17:56:04 +11001684 * After unregistering unregister and down device events are synthesized
1685 * for all devices on the device list to the removed notifier to remove
1686 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 */
1688
1689int unregister_netdevice_notifier(struct notifier_block *nb)
1690{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001691 struct net_device *dev;
1692 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001693 int err;
1694
1695 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001696 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001697 if (err)
1698 goto unlock;
1699
Kirill Tkhaif0b07bb12018-03-29 19:20:32 +03001700 down_read(&net_rwsem);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001701 for_each_net(net) {
1702 for_each_netdev(net, dev) {
1703 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001704 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1705 dev);
1706 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001707 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001708 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001709 }
1710 }
Kirill Tkhaif0b07bb12018-03-29 19:20:32 +03001711 up_read(&net_rwsem);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001712unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001713 rtnl_unlock();
1714 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001716EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001719 * call_netdevice_notifiers_info - call all network notifier blocks
1720 * @val: value passed unmodified to notifier function
Jiri Pirko351638e2013-05-28 01:30:21 +00001721 * @info: notifier information data
1722 *
1723 * Call all network notifier blocks. Parameters and return value
1724 * are as for raw_notifier_call_chain().
1725 */
1726
stephen hemminger1d143d92013-12-29 14:01:29 -08001727static int call_netdevice_notifiers_info(unsigned long val,
stephen hemminger1d143d92013-12-29 14:01:29 -08001728 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001729{
1730 ASSERT_RTNL();
Jiri Pirko351638e2013-05-28 01:30:21 +00001731 return raw_notifier_call_chain(&netdev_chain, val, info);
1732}
Jiri Pirko351638e2013-05-28 01:30:21 +00001733
1734/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 * call_netdevice_notifiers - call all network notifier blocks
1736 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001737 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 *
1739 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001740 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 */
1742
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001743int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744{
David Ahern51d0c0472017-10-04 17:48:45 -07001745 struct netdev_notifier_info info = {
1746 .dev = dev,
1747 };
Jiri Pirko351638e2013-05-28 01:30:21 +00001748
David Ahern51d0c0472017-10-04 17:48:45 -07001749 return call_netdevice_notifiers_info(val, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001751EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Pablo Neira1cf519002015-05-13 18:19:37 +02001753#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02001754static struct static_key ingress_needed __read_mostly;
1755
1756void net_inc_ingress_queue(void)
1757{
1758 static_key_slow_inc(&ingress_needed);
1759}
1760EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1761
1762void net_dec_ingress_queue(void)
1763{
1764 static_key_slow_dec(&ingress_needed);
1765}
1766EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1767#endif
1768
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001769#ifdef CONFIG_NET_EGRESS
1770static struct static_key egress_needed __read_mostly;
1771
1772void net_inc_egress_queue(void)
1773{
1774 static_key_slow_inc(&egress_needed);
1775}
1776EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1777
1778void net_dec_egress_queue(void)
1779{
1780 static_key_slow_dec(&egress_needed);
1781}
1782EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1783#endif
1784
Ingo Molnarc5905af2012-02-24 08:31:31 +01001785static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001786#ifdef HAVE_JUMP_LABEL
Eric Dumazetb90e5792011-11-28 11:16:50 +00001787static atomic_t netstamp_needed_deferred;
Eric Dumazet13baa002017-03-01 14:28:39 -08001788static atomic_t netstamp_wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001789static void netstamp_clear(struct work_struct *work)
1790{
1791 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
Eric Dumazet13baa002017-03-01 14:28:39 -08001792 int wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001793
Eric Dumazet13baa002017-03-01 14:28:39 -08001794 wanted = atomic_add_return(deferred, &netstamp_wanted);
1795 if (wanted > 0)
1796 static_key_enable(&netstamp_needed);
1797 else
1798 static_key_disable(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001799}
1800static DECLARE_WORK(netstamp_work, netstamp_clear);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001801#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
1803void net_enable_timestamp(void)
1804{
Eric Dumazet13baa002017-03-01 14:28:39 -08001805#ifdef HAVE_JUMP_LABEL
1806 int wanted;
1807
1808 while (1) {
1809 wanted = atomic_read(&netstamp_wanted);
1810 if (wanted <= 0)
1811 break;
1812 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1813 return;
1814 }
1815 atomic_inc(&netstamp_needed_deferred);
1816 schedule_work(&netstamp_work);
1817#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001818 static_key_slow_inc(&netstamp_needed);
Eric Dumazet13baa002017-03-01 14:28:39 -08001819#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001821EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
1823void net_disable_timestamp(void)
1824{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001825#ifdef HAVE_JUMP_LABEL
Eric Dumazet13baa002017-03-01 14:28:39 -08001826 int wanted;
1827
1828 while (1) {
1829 wanted = atomic_read(&netstamp_wanted);
1830 if (wanted <= 1)
1831 break;
1832 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1833 return;
1834 }
1835 atomic_dec(&netstamp_needed_deferred);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001836 schedule_work(&netstamp_work);
1837#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001838 static_key_slow_dec(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001839#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001841EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Eric Dumazet3b098e22010-05-15 23:57:10 -07001843static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844{
Thomas Gleixner2456e852016-12-25 11:38:40 +01001845 skb->tstamp = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001846 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001847 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848}
1849
Eric Dumazet588f0332011-11-15 04:12:55 +00001850#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001851 if (static_key_false(&netstamp_needed)) { \
Thomas Gleixner2456e852016-12-25 11:38:40 +01001852 if ((COND) && !(SKB)->tstamp) \
Eric Dumazet588f0332011-11-15 04:12:55 +00001853 __net_timestamp(SKB); \
1854 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001855
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02001856bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001857{
1858 unsigned int len;
1859
1860 if (!(dev->flags & IFF_UP))
1861 return false;
1862
1863 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1864 if (skb->len <= len)
1865 return true;
1866
1867 /* if TSO is enabled, we don't care about the length as the packet
1868 * could be forwarded without being segmented before
1869 */
1870 if (skb_is_gso(skb))
1871 return true;
1872
1873 return false;
1874}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001875EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001876
Herbert Xua0265d22014-04-17 13:45:03 +08001877int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1878{
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001879 int ret = ____dev_forward_skb(dev, skb);
1880
1881 if (likely(!ret)) {
1882 skb->protocol = eth_type_trans(skb, dev);
1883 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001884 }
1885
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001886 return ret;
Herbert Xua0265d22014-04-17 13:45:03 +08001887}
1888EXPORT_SYMBOL_GPL(__dev_forward_skb);
1889
Arnd Bergmann44540962009-11-26 06:07:08 +00001890/**
1891 * dev_forward_skb - loopback an skb to another netif
1892 *
1893 * @dev: destination network device
1894 * @skb: buffer to forward
1895 *
1896 * return values:
1897 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001898 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001899 *
1900 * dev_forward_skb can be used for injecting an skb from the
1901 * start_xmit function of one device into the receive queue
1902 * of another device.
1903 *
1904 * The receiving device may be in another namespace, so
1905 * we have to clear all information in the skb that could
1906 * impact namespace isolation.
1907 */
1908int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1909{
Herbert Xua0265d22014-04-17 13:45:03 +08001910 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001911}
1912EXPORT_SYMBOL_GPL(dev_forward_skb);
1913
Changli Gao71d9dec2010-12-15 19:57:25 +00001914static inline int deliver_skb(struct sk_buff *skb,
1915 struct packet_type *pt_prev,
1916 struct net_device *orig_dev)
1917{
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04001918 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001919 return -ENOMEM;
Reshetova, Elena63354792017-06-30 13:07:58 +03001920 refcount_inc(&skb->users);
Changli Gao71d9dec2010-12-15 19:57:25 +00001921 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1922}
1923
Salam Noureddine7866a622015-01-27 11:35:48 -08001924static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1925 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001926 struct net_device *orig_dev,
1927 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001928 struct list_head *ptype_list)
1929{
1930 struct packet_type *ptype, *pt_prev = *pt;
1931
1932 list_for_each_entry_rcu(ptype, ptype_list, list) {
1933 if (ptype->type != type)
1934 continue;
1935 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001936 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001937 pt_prev = ptype;
1938 }
1939 *pt = pt_prev;
1940}
1941
Eric Leblondc0de08d2012-08-16 22:02:58 +00001942static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1943{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001944 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001945 return false;
1946
1947 if (ptype->id_match)
1948 return ptype->id_match(ptype, skb->sk);
1949 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1950 return true;
1951
1952 return false;
1953}
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955/*
1956 * Support routine. Sends outgoing frames to any network
1957 * taps currently in use.
1958 */
1959
David Ahern74b20582016-05-10 11:19:50 -07001960void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961{
1962 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001963 struct sk_buff *skb2 = NULL;
1964 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001965 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001968again:
1969 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 /* Never send packets back to the socket
1971 * they originated from - MvS (miquels@drinkel.ow.org)
1972 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001973 if (skb_loop_sk(ptype, skb))
1974 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001975
Salam Noureddine7866a622015-01-27 11:35:48 -08001976 if (pt_prev) {
1977 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001978 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001979 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001981
1982 /* need to clone skb, done only once */
1983 skb2 = skb_clone(skb, GFP_ATOMIC);
1984 if (!skb2)
1985 goto out_unlock;
1986
1987 net_timestamp_set(skb2);
1988
1989 /* skb->nh should be correctly
1990 * set by sender, so that the second statement is
1991 * just protection against buggy protocols.
1992 */
1993 skb_reset_mac_header(skb2);
1994
1995 if (skb_network_header(skb2) < skb2->data ||
1996 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1997 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1998 ntohs(skb2->protocol),
1999 dev->name);
2000 skb_reset_network_header(skb2);
2001 }
2002
2003 skb2->transport_header = skb2->network_header;
2004 skb2->pkt_type = PACKET_OUTGOING;
2005 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 }
Salam Noureddine7866a622015-01-27 11:35:48 -08002007
2008 if (ptype_list == &ptype_all) {
2009 ptype_list = &dev->ptype_all;
2010 goto again;
2011 }
2012out_unlock:
Willem de Bruijn581fe0e2017-09-22 19:42:37 -04002013 if (pt_prev) {
2014 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2015 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2016 else
2017 kfree_skb(skb2);
2018 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 rcu_read_unlock();
2020}
David Ahern74b20582016-05-10 11:19:50 -07002021EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022
Ben Hutchings2c530402012-07-10 10:55:09 +00002023/**
2024 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00002025 * @dev: Network device
2026 * @txq: number of queues available
2027 *
2028 * If real_num_tx_queues is changed the tc mappings may no longer be
2029 * valid. To resolve this verify the tc mapping remains valid and if
2030 * not NULL the mapping. With no priorities mapping to this
2031 * offset/count pair it will no longer be used. In the worst case TC0
2032 * is invalid nothing can be done so disable priority mappings. If is
2033 * expected that drivers will fix this mapping if they can before
2034 * calling netif_set_real_num_tx_queues.
2035 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00002036static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00002037{
2038 int i;
2039 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2040
2041 /* If TC0 is invalidated disable TC mapping */
2042 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002043 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00002044 dev->num_tc = 0;
2045 return;
2046 }
2047
2048 /* Invalidated prio to tc mappings set to TC0 */
2049 for (i = 1; i < TC_BITMASK + 1; i++) {
2050 int q = netdev_get_prio_tc_map(dev, i);
2051
2052 tc = &dev->tc_to_txq[q];
2053 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002054 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2055 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00002056 netdev_set_prio_tc_map(dev, i, 0);
2057 }
2058 }
2059}
2060
Alexander Duyck8d059b02016-10-28 11:43:49 -04002061int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2062{
2063 if (dev->num_tc) {
2064 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2065 int i;
2066
2067 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2068 if ((txq - tc->offset) < tc->count)
2069 return i;
2070 }
2071
2072 return -1;
2073 }
2074
2075 return 0;
2076}
Henrik Austad8a5f2162017-10-17 12:10:10 +02002077EXPORT_SYMBOL(netdev_txq_to_tc);
Alexander Duyck8d059b02016-10-28 11:43:49 -04002078
Alexander Duyck537c00d2013-01-10 08:57:02 +00002079#ifdef CONFIG_XPS
2080static DEFINE_MUTEX(xps_map_mutex);
2081#define xmap_dereference(P) \
2082 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2083
Alexander Duyck6234f872016-10-28 11:46:49 -04002084static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2085 int tci, u16 index)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002086{
2087 struct xps_map *map = NULL;
2088 int pos;
2089
2090 if (dev_maps)
Alexander Duyck6234f872016-10-28 11:46:49 -04002091 map = xmap_dereference(dev_maps->cpu_map[tci]);
2092 if (!map)
2093 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002094
Alexander Duyck6234f872016-10-28 11:46:49 -04002095 for (pos = map->len; pos--;) {
2096 if (map->queues[pos] != index)
2097 continue;
2098
2099 if (map->len > 1) {
2100 map->queues[pos] = map->queues[--map->len];
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002101 break;
2102 }
Alexander Duyck6234f872016-10-28 11:46:49 -04002103
2104 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2105 kfree_rcu(map, rcu);
2106 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002107 }
2108
Alexander Duyck6234f872016-10-28 11:46:49 -04002109 return true;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002110}
2111
Alexander Duyck6234f872016-10-28 11:46:49 -04002112static bool remove_xps_queue_cpu(struct net_device *dev,
2113 struct xps_dev_maps *dev_maps,
2114 int cpu, u16 offset, u16 count)
2115{
Alexander Duyck184c4492016-10-28 11:50:13 -04002116 int num_tc = dev->num_tc ? : 1;
2117 bool active = false;
2118 int tci;
Alexander Duyck6234f872016-10-28 11:46:49 -04002119
Alexander Duyck184c4492016-10-28 11:50:13 -04002120 for (tci = cpu * num_tc; num_tc--; tci++) {
2121 int i, j;
2122
2123 for (i = count, j = offset; i--; j++) {
2124 if (!remove_xps_queue(dev_maps, cpu, j))
2125 break;
2126 }
2127
2128 active |= i < 0;
Alexander Duyck6234f872016-10-28 11:46:49 -04002129 }
2130
Alexander Duyck184c4492016-10-28 11:50:13 -04002131 return active;
Alexander Duyck6234f872016-10-28 11:46:49 -04002132}
2133
2134static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2135 u16 count)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002136{
2137 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00002138 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002139 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002140
2141 mutex_lock(&xps_map_mutex);
2142 dev_maps = xmap_dereference(dev->xps_maps);
2143
2144 if (!dev_maps)
2145 goto out_no_maps;
2146
Alexander Duyck6234f872016-10-28 11:46:49 -04002147 for_each_possible_cpu(cpu)
2148 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2149 offset, count);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002150
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002151 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00002152 RCU_INIT_POINTER(dev->xps_maps, NULL);
2153 kfree_rcu(dev_maps, rcu);
2154 }
2155
Alexander Duyck6234f872016-10-28 11:46:49 -04002156 for (i = offset + (count - 1); count--; i--)
Alexander Duyck024e9672013-01-10 08:57:46 +00002157 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2158 NUMA_NO_NODE);
2159
Alexander Duyck537c00d2013-01-10 08:57:02 +00002160out_no_maps:
2161 mutex_unlock(&xps_map_mutex);
2162}
2163
Alexander Duyck6234f872016-10-28 11:46:49 -04002164static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2165{
2166 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2167}
2168
Alexander Duyck01c5f862013-01-10 08:57:35 +00002169static struct xps_map *expand_xps_map(struct xps_map *map,
2170 int cpu, u16 index)
2171{
2172 struct xps_map *new_map;
2173 int alloc_len = XPS_MIN_MAP_ALLOC;
2174 int i, pos;
2175
2176 for (pos = 0; map && pos < map->len; pos++) {
2177 if (map->queues[pos] != index)
2178 continue;
2179 return map;
2180 }
2181
2182 /* Need to add queue to this CPU's existing map */
2183 if (map) {
2184 if (pos < map->alloc_len)
2185 return map;
2186
2187 alloc_len = map->alloc_len * 2;
2188 }
2189
2190 /* Need to allocate new map to store queue on this CPU's map */
2191 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2192 cpu_to_node(cpu));
2193 if (!new_map)
2194 return NULL;
2195
2196 for (i = 0; i < pos; i++)
2197 new_map->queues[i] = map->queues[i];
2198 new_map->alloc_len = alloc_len;
2199 new_map->len = pos;
2200
2201 return new_map;
2202}
2203
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002204int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2205 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002206{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002207 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck184c4492016-10-28 11:50:13 -04002208 int i, cpu, tci, numa_node_id = -2;
2209 int maps_sz, num_tc = 1, tc = 0;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002210 struct xps_map *map, *new_map;
Alexander Duyck01c5f862013-01-10 08:57:35 +00002211 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002212
Alexander Duyck184c4492016-10-28 11:50:13 -04002213 if (dev->num_tc) {
2214 num_tc = dev->num_tc;
2215 tc = netdev_txq_to_tc(dev, index);
2216 if (tc < 0)
2217 return -EINVAL;
2218 }
2219
2220 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2221 if (maps_sz < L1_CACHE_BYTES)
2222 maps_sz = L1_CACHE_BYTES;
2223
Alexander Duyck537c00d2013-01-10 08:57:02 +00002224 mutex_lock(&xps_map_mutex);
2225
2226 dev_maps = xmap_dereference(dev->xps_maps);
2227
Alexander Duyck01c5f862013-01-10 08:57:35 +00002228 /* allocate memory for queue storage */
Alexander Duyck184c4492016-10-28 11:50:13 -04002229 for_each_cpu_and(cpu, cpu_online_mask, mask) {
Alexander Duyck01c5f862013-01-10 08:57:35 +00002230 if (!new_dev_maps)
2231 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002232 if (!new_dev_maps) {
2233 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002234 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002235 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002236
Alexander Duyck184c4492016-10-28 11:50:13 -04002237 tci = cpu * num_tc + tc;
2238 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
Alexander Duyck01c5f862013-01-10 08:57:35 +00002239 NULL;
2240
2241 map = expand_xps_map(map, cpu, index);
2242 if (!map)
2243 goto error;
2244
Alexander Duyck184c4492016-10-28 11:50:13 -04002245 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002246 }
2247
2248 if (!new_dev_maps)
2249 goto out_no_new_maps;
2250
2251 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002252 /* copy maps belonging to foreign traffic classes */
2253 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2254 /* fill in the new device map from the old device map */
2255 map = xmap_dereference(dev_maps->cpu_map[tci]);
2256 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2257 }
2258
2259 /* We need to explicitly update tci as prevous loop
2260 * could break out early if dev_maps is NULL.
2261 */
2262 tci = cpu * num_tc + tc;
2263
Alexander Duyck01c5f862013-01-10 08:57:35 +00002264 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2265 /* add queue to CPU maps */
2266 int pos = 0;
2267
Alexander Duyck184c4492016-10-28 11:50:13 -04002268 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002269 while ((pos < map->len) && (map->queues[pos] != index))
2270 pos++;
2271
2272 if (pos == map->len)
2273 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002274#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002275 if (numa_node_id == -2)
2276 numa_node_id = cpu_to_node(cpu);
2277 else if (numa_node_id != cpu_to_node(cpu))
2278 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002279#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002280 } else if (dev_maps) {
2281 /* fill in the new device map from the old device map */
Alexander Duyck184c4492016-10-28 11:50:13 -04002282 map = xmap_dereference(dev_maps->cpu_map[tci]);
2283 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002284 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002285
Alexander Duyck184c4492016-10-28 11:50:13 -04002286 /* copy maps belonging to foreign traffic classes */
2287 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2288 /* fill in the new device map from the old device map */
2289 map = xmap_dereference(dev_maps->cpu_map[tci]);
2290 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2291 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002292 }
2293
Alexander Duyck01c5f862013-01-10 08:57:35 +00002294 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2295
Alexander Duyck537c00d2013-01-10 08:57:02 +00002296 /* Cleanup old maps */
Alexander Duyck184c4492016-10-28 11:50:13 -04002297 if (!dev_maps)
2298 goto out_no_old_maps;
2299
2300 for_each_possible_cpu(cpu) {
2301 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2302 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2303 map = xmap_dereference(dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002304 if (map && map != new_map)
2305 kfree_rcu(map, rcu);
2306 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002307 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002308
Alexander Duyck184c4492016-10-28 11:50:13 -04002309 kfree_rcu(dev_maps, rcu);
2310
2311out_no_old_maps:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002312 dev_maps = new_dev_maps;
2313 active = true;
2314
2315out_no_new_maps:
2316 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002317 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2318 (numa_node_id >= 0) ? numa_node_id :
2319 NUMA_NO_NODE);
2320
Alexander Duyck01c5f862013-01-10 08:57:35 +00002321 if (!dev_maps)
2322 goto out_no_maps;
2323
2324 /* removes queue from unused CPUs */
2325 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002326 for (i = tc, tci = cpu * num_tc; i--; tci++)
2327 active |= remove_xps_queue(dev_maps, tci, index);
2328 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2329 active |= remove_xps_queue(dev_maps, tci, index);
2330 for (i = num_tc - tc, tci++; --i; tci++)
2331 active |= remove_xps_queue(dev_maps, tci, index);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002332 }
2333
2334 /* free map if not active */
2335 if (!active) {
2336 RCU_INIT_POINTER(dev->xps_maps, NULL);
2337 kfree_rcu(dev_maps, rcu);
2338 }
2339
2340out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002341 mutex_unlock(&xps_map_mutex);
2342
2343 return 0;
2344error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002345 /* remove any maps that we added */
2346 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002347 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2348 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2349 map = dev_maps ?
2350 xmap_dereference(dev_maps->cpu_map[tci]) :
2351 NULL;
2352 if (new_map && new_map != map)
2353 kfree(new_map);
2354 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002355 }
2356
Alexander Duyck537c00d2013-01-10 08:57:02 +00002357 mutex_unlock(&xps_map_mutex);
2358
Alexander Duyck537c00d2013-01-10 08:57:02 +00002359 kfree(new_dev_maps);
2360 return -ENOMEM;
2361}
2362EXPORT_SYMBOL(netif_set_xps_queue);
2363
2364#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002365void netdev_reset_tc(struct net_device *dev)
2366{
Alexander Duyck6234f872016-10-28 11:46:49 -04002367#ifdef CONFIG_XPS
2368 netif_reset_xps_queues_gt(dev, 0);
2369#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002370 dev->num_tc = 0;
2371 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2372 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2373}
2374EXPORT_SYMBOL(netdev_reset_tc);
2375
2376int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2377{
2378 if (tc >= dev->num_tc)
2379 return -EINVAL;
2380
Alexander Duyck6234f872016-10-28 11:46:49 -04002381#ifdef CONFIG_XPS
2382 netif_reset_xps_queues(dev, offset, count);
2383#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002384 dev->tc_to_txq[tc].count = count;
2385 dev->tc_to_txq[tc].offset = offset;
2386 return 0;
2387}
2388EXPORT_SYMBOL(netdev_set_tc_queue);
2389
2390int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2391{
2392 if (num_tc > TC_MAX_QUEUE)
2393 return -EINVAL;
2394
Alexander Duyck6234f872016-10-28 11:46:49 -04002395#ifdef CONFIG_XPS
2396 netif_reset_xps_queues_gt(dev, 0);
2397#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002398 dev->num_tc = num_tc;
2399 return 0;
2400}
2401EXPORT_SYMBOL(netdev_set_num_tc);
2402
John Fastabendf0796d52010-07-01 13:21:57 +00002403/*
2404 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
Gal Pressman3a053b12018-02-28 15:59:15 +02002405 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
John Fastabendf0796d52010-07-01 13:21:57 +00002406 */
Tom Herberte6484932010-10-18 18:04:39 +00002407int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002408{
Jakub Kicinskiac5b7012018-02-12 21:35:31 -08002409 bool disabling;
Tom Herbert1d24eb42010-11-21 13:17:27 +00002410 int rc;
2411
Jakub Kicinskiac5b7012018-02-12 21:35:31 -08002412 disabling = txq < dev->real_num_tx_queues;
2413
Tom Herberte6484932010-10-18 18:04:39 +00002414 if (txq < 1 || txq > dev->num_tx_queues)
2415 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002416
Ben Hutchings5c565802011-02-15 19:39:21 +00002417 if (dev->reg_state == NETREG_REGISTERED ||
2418 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002419 ASSERT_RTNL();
2420
Tom Herbert1d24eb42010-11-21 13:17:27 +00002421 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2422 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002423 if (rc)
2424 return rc;
2425
John Fastabend4f57c082011-01-17 08:06:04 +00002426 if (dev->num_tc)
2427 netif_setup_tc(dev, txq);
2428
Jakub Kicinskiac5b7012018-02-12 21:35:31 -08002429 dev->real_num_tx_queues = txq;
2430
2431 if (disabling) {
2432 synchronize_net();
Tom Herberte6484932010-10-18 18:04:39 +00002433 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002434#ifdef CONFIG_XPS
2435 netif_reset_xps_queues_gt(dev, txq);
2436#endif
2437 }
Jakub Kicinskiac5b7012018-02-12 21:35:31 -08002438 } else {
2439 dev->real_num_tx_queues = txq;
John Fastabendf0796d52010-07-01 13:21:57 +00002440 }
Tom Herberte6484932010-10-18 18:04:39 +00002441
Tom Herberte6484932010-10-18 18:04:39 +00002442 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002443}
2444EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002445
Michael Daltona953be52014-01-16 22:23:28 -08002446#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002447/**
2448 * netif_set_real_num_rx_queues - set actual number of RX queues used
2449 * @dev: Network device
2450 * @rxq: Actual number of RX queues
2451 *
2452 * This must be called either with the rtnl_lock held or before
2453 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002454 * negative error code. If called before registration, it always
2455 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002456 */
2457int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2458{
2459 int rc;
2460
Tom Herbertbd25fa72010-10-18 18:00:16 +00002461 if (rxq < 1 || rxq > dev->num_rx_queues)
2462 return -EINVAL;
2463
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002464 if (dev->reg_state == NETREG_REGISTERED) {
2465 ASSERT_RTNL();
2466
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002467 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2468 rxq);
2469 if (rc)
2470 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002471 }
2472
2473 dev->real_num_rx_queues = rxq;
2474 return 0;
2475}
2476EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2477#endif
2478
Ben Hutchings2c530402012-07-10 10:55:09 +00002479/**
2480 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002481 *
2482 * This routine should set an upper limit on the number of RSS queues
2483 * used by default by multiqueue devices.
2484 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002485int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002486{
Hariprasad Shenai40e4e712016-06-08 18:09:08 +05302487 return is_kdump_kernel() ?
2488 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
Yuval Mintz16917b82012-07-01 03:18:50 +00002489}
2490EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2491
Eric Dumazet3bcb8462016-06-04 20:02:28 -07002492static void __netif_reschedule(struct Qdisc *q)
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002493{
2494 struct softnet_data *sd;
2495 unsigned long flags;
2496
2497 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002498 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002499 q->next_sched = NULL;
2500 *sd->output_queue_tailp = q;
2501 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002502 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2503 local_irq_restore(flags);
2504}
2505
David S. Miller37437bb2008-07-16 02:15:04 -07002506void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002507{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002508 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2509 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002510}
2511EXPORT_SYMBOL(__netif_schedule);
2512
Eric Dumazete6247022013-12-05 04:45:08 -08002513struct dev_kfree_skb_cb {
2514 enum skb_free_reason reason;
2515};
2516
2517static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002518{
Eric Dumazete6247022013-12-05 04:45:08 -08002519 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002520}
Denis Vlasenko56079432006-03-29 15:57:29 -08002521
John Fastabend46e5da40a2014-09-12 20:04:52 -07002522void netif_schedule_queue(struct netdev_queue *txq)
2523{
2524 rcu_read_lock();
2525 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2526 struct Qdisc *q = rcu_dereference(txq->qdisc);
2527
2528 __netif_schedule(q);
2529 }
2530 rcu_read_unlock();
2531}
2532EXPORT_SYMBOL(netif_schedule_queue);
2533
John Fastabend46e5da40a2014-09-12 20:04:52 -07002534void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2535{
2536 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2537 struct Qdisc *q;
2538
2539 rcu_read_lock();
2540 q = rcu_dereference(dev_queue->qdisc);
2541 __netif_schedule(q);
2542 rcu_read_unlock();
2543 }
2544}
2545EXPORT_SYMBOL(netif_tx_wake_queue);
2546
Eric Dumazete6247022013-12-05 04:45:08 -08002547void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2548{
2549 unsigned long flags;
2550
Myungho Jung98998862017-04-25 11:58:15 -07002551 if (unlikely(!skb))
2552 return;
2553
Reshetova, Elena63354792017-06-30 13:07:58 +03002554 if (likely(refcount_read(&skb->users) == 1)) {
Eric Dumazete6247022013-12-05 04:45:08 -08002555 smp_rmb();
Reshetova, Elena63354792017-06-30 13:07:58 +03002556 refcount_set(&skb->users, 0);
2557 } else if (likely(!refcount_dec_and_test(&skb->users))) {
Eric Dumazete6247022013-12-05 04:45:08 -08002558 return;
2559 }
2560 get_kfree_skb_cb(skb)->reason = reason;
2561 local_irq_save(flags);
2562 skb->next = __this_cpu_read(softnet_data.completion_queue);
2563 __this_cpu_write(softnet_data.completion_queue, skb);
2564 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2565 local_irq_restore(flags);
2566}
2567EXPORT_SYMBOL(__dev_kfree_skb_irq);
2568
2569void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002570{
2571 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002572 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002573 else
2574 dev_kfree_skb(skb);
2575}
Eric Dumazete6247022013-12-05 04:45:08 -08002576EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002577
2578
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002579/**
2580 * netif_device_detach - mark device as removed
2581 * @dev: network device
2582 *
2583 * Mark device as removed from system and therefore no longer available.
2584 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002585void netif_device_detach(struct net_device *dev)
2586{
2587 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2588 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002589 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002590 }
2591}
2592EXPORT_SYMBOL(netif_device_detach);
2593
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002594/**
2595 * netif_device_attach - mark device as attached
2596 * @dev: network device
2597 *
2598 * Mark device as attached from system and restart if needed.
2599 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002600void netif_device_attach(struct net_device *dev)
2601{
2602 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2603 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002604 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002605 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002606 }
2607}
2608EXPORT_SYMBOL(netif_device_attach);
2609
Jiri Pirko5605c762015-05-12 14:56:12 +02002610/*
2611 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2612 * to be used as a distribution range.
2613 */
2614u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2615 unsigned int num_tx_queues)
2616{
2617 u32 hash;
2618 u16 qoffset = 0;
2619 u16 qcount = num_tx_queues;
2620
2621 if (skb_rx_queue_recorded(skb)) {
2622 hash = skb_get_rx_queue(skb);
2623 while (unlikely(hash >= num_tx_queues))
2624 hash -= num_tx_queues;
2625 return hash;
2626 }
2627
2628 if (dev->num_tc) {
2629 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
tchardingf4563a72017-02-09 17:56:07 +11002630
Jiri Pirko5605c762015-05-12 14:56:12 +02002631 qoffset = dev->tc_to_txq[tc].offset;
2632 qcount = dev->tc_to_txq[tc].count;
2633 }
2634
2635 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2636}
2637EXPORT_SYMBOL(__skb_tx_hash);
2638
Ben Hutchings36c92472012-01-17 07:57:56 +00002639static void skb_warn_bad_offload(const struct sk_buff *skb)
2640{
Wei Tang84d15ae2016-06-16 21:17:49 +08002641 static const netdev_features_t null_features;
Ben Hutchings36c92472012-01-17 07:57:56 +00002642 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01002643 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00002644
Ben Greearc846ad92013-04-19 10:45:52 +00002645 if (!net_ratelimit())
2646 return;
2647
Bjørn Mork88ad4172015-11-16 19:16:40 +01002648 if (dev) {
2649 if (dev->dev.parent)
2650 name = dev_driver_string(dev->dev.parent);
2651 else
2652 name = netdev_name(dev);
2653 }
Ben Hutchings36c92472012-01-17 07:57:56 +00002654 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2655 "gso_type=%d ip_summed=%d\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01002656 name, dev ? &dev->features : &null_features,
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002657 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002658 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2659 skb_shinfo(skb)->gso_type, skb->ip_summed);
2660}
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662/*
2663 * Invalidate hardware checksum when packet is to be mangled, and
2664 * complete checksum manually on outgoing path.
2665 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002666int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667{
Al Virod3bc23e2006-11-14 21:24:49 -08002668 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002669 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
Patrick McHardy84fa7932006-08-29 16:44:56 -07002671 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002672 goto out_set_summed;
2673
2674 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002675 skb_warn_bad_offload(skb);
2676 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 }
2678
Eric Dumazetcef401d2013-01-25 20:34:37 +00002679 /* Before computing a checksum, we should make sure no frag could
2680 * be modified by an external entity : checksum could be wrong.
2681 */
2682 if (skb_has_shared_frag(skb)) {
2683 ret = __skb_linearize(skb);
2684 if (ret)
2685 goto out;
2686 }
2687
Michał Mirosław55508d62010-12-14 15:24:08 +00002688 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002689 BUG_ON(offset >= skb_headlen(skb));
2690 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2691
2692 offset += skb->csum_offset;
2693 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2694
2695 if (skb_cloned(skb) &&
2696 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2698 if (ret)
2699 goto out;
2700 }
2701
Eric Dumazet4f2e4ad2016-10-29 11:02:36 -07002702 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
Herbert Xua430a432006-07-08 13:34:56 -07002703out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002705out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 return ret;
2707}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002708EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
Davide Carattib72b5bf2017-05-18 15:44:38 +02002710int skb_crc32c_csum_help(struct sk_buff *skb)
2711{
2712 __le32 crc32c_csum;
2713 int ret = 0, offset, start;
2714
2715 if (skb->ip_summed != CHECKSUM_PARTIAL)
2716 goto out;
2717
2718 if (unlikely(skb_is_gso(skb)))
2719 goto out;
2720
2721 /* Before computing a checksum, we should make sure no frag could
2722 * be modified by an external entity : checksum could be wrong.
2723 */
2724 if (unlikely(skb_has_shared_frag(skb))) {
2725 ret = __skb_linearize(skb);
2726 if (ret)
2727 goto out;
2728 }
2729 start = skb_checksum_start_offset(skb);
2730 offset = start + offsetof(struct sctphdr, checksum);
2731 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2732 ret = -EINVAL;
2733 goto out;
2734 }
2735 if (skb_cloned(skb) &&
2736 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2737 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2738 if (ret)
2739 goto out;
2740 }
2741 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2742 skb->len - start, ~(__u32)0,
2743 crc32c_csum_stub));
2744 *(__le32 *)(skb->data + offset) = crc32c_csum;
2745 skb->ip_summed = CHECKSUM_NONE;
Davide Carattidba00302017-05-18 15:44:40 +02002746 skb->csum_not_inet = 0;
Davide Carattib72b5bf2017-05-18 15:44:38 +02002747out:
2748 return ret;
2749}
2750
Vlad Yasevich53d64712014-03-27 17:26:18 -04002751__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002752{
2753 __be16 type = skb->protocol;
2754
Pravin B Shelar19acc322013-05-07 20:41:07 +00002755 /* Tunnel gso handlers can set protocol to ethernet. */
2756 if (type == htons(ETH_P_TEB)) {
2757 struct ethhdr *eth;
2758
2759 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2760 return 0;
2761
2762 eth = (struct ethhdr *)skb_mac_header(skb);
2763 type = eth->h_proto;
2764 }
2765
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002766 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002767}
2768
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002769/**
2770 * skb_mac_gso_segment - mac layer segmentation handler.
2771 * @skb: buffer to segment
2772 * @features: features for the output path (see dev->features)
2773 */
2774struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2775 netdev_features_t features)
2776{
2777 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2778 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002779 int vlan_depth = skb->mac_len;
2780 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002781
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002782 if (unlikely(!type))
2783 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002784
Vlad Yasevich53d64712014-03-27 17:26:18 -04002785 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002786
2787 rcu_read_lock();
2788 list_for_each_entry_rcu(ptype, &offload_base, list) {
2789 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002790 segs = ptype->callbacks.gso_segment(skb, features);
2791 break;
2792 }
2793 }
2794 rcu_read_unlock();
2795
2796 __skb_push(skb, skb->data - skb_mac_header(skb));
2797
2798 return segs;
2799}
2800EXPORT_SYMBOL(skb_mac_gso_segment);
2801
2802
Cong Wang12b00042013-02-05 16:36:38 +00002803/* openvswitch calls this on rx path, so we need a different check.
2804 */
2805static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2806{
2807 if (tx_path)
Willem de Bruijn0c19f8462017-11-21 10:22:25 -05002808 return skb->ip_summed != CHECKSUM_PARTIAL &&
2809 skb->ip_summed != CHECKSUM_UNNECESSARY;
Eric Dumazet6e7bc472017-02-03 14:29:42 -08002810
2811 return skb->ip_summed == CHECKSUM_NONE;
Cong Wang12b00042013-02-05 16:36:38 +00002812}
2813
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002814/**
Cong Wang12b00042013-02-05 16:36:38 +00002815 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002816 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002817 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002818 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002819 *
2820 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002821 *
2822 * It may return NULL if the skb requires no segmentation. This is
2823 * only possible when GSO is used for verifying header integrity.
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002824 *
2825 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002826 */
Cong Wang12b00042013-02-05 16:36:38 +00002827struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2828 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002829{
Eric Dumazetb2504a52017-01-31 10:20:32 -08002830 struct sk_buff *segs;
2831
Cong Wang12b00042013-02-05 16:36:38 +00002832 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002833 int err;
2834
Eric Dumazetb2504a52017-01-31 10:20:32 -08002835 /* We're going to init ->check field in TCP or UDP header */
françois romieua40e0a62014-07-15 23:55:35 +02002836 err = skb_cow_head(skb, 0);
2837 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002838 return ERR_PTR(err);
2839 }
2840
Alexander Duyck802ab552016-04-10 21:45:03 -04002841 /* Only report GSO partial support if it will enable us to
2842 * support segmentation on this frame without needing additional
2843 * work.
2844 */
2845 if (features & NETIF_F_GSO_PARTIAL) {
2846 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2847 struct net_device *dev = skb->dev;
2848
2849 partial_features |= dev->features & dev->gso_partial_features;
2850 if (!skb_gso_ok(skb, features | partial_features))
2851 features &= ~NETIF_F_GSO_PARTIAL;
2852 }
2853
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002854 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2855 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2856
Pravin B Shelar68c33162013-02-14 14:02:41 +00002857 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002858 SKB_GSO_CB(skb)->encap_level = 0;
2859
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002860 skb_reset_mac_header(skb);
2861 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002862
Eric Dumazetb2504a52017-01-31 10:20:32 -08002863 segs = skb_mac_gso_segment(skb, features);
2864
Willem de Bruijn8d74e9f2017-12-12 11:39:04 -05002865 if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
Eric Dumazetb2504a52017-01-31 10:20:32 -08002866 skb_warn_bad_offload(skb);
2867
2868 return segs;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002869}
Cong Wang12b00042013-02-05 16:36:38 +00002870EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002871
Herbert Xufb286bb2005-11-10 13:01:24 -08002872/* Take action when hardware reception checksum errors are detected. */
2873#ifdef CONFIG_BUG
2874void netdev_rx_csum_fault(struct net_device *dev)
2875{
2876 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002877 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002878 dump_stack();
2879 }
2880}
2881EXPORT_SYMBOL(netdev_rx_csum_fault);
2882#endif
2883
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884/* Actually, we should eliminate this check as soon as we know, that:
2885 * 1. IOMMU is present and allows to map all the memory.
2886 * 2. No high memory really exists on this machine.
2887 */
2888
Florian Westphalc1e756b2014-05-05 15:00:44 +02002889static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002891#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 int i;
tchardingf4563a72017-02-09 17:56:07 +11002893
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002894 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002895 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2896 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tchardingf4563a72017-02-09 17:56:07 +11002897
Ian Campbellea2ab692011-08-22 23:44:58 +00002898 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002899 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002900 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002901 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002903 if (PCI_DMA_BUS_IS_PHYS) {
2904 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
Eric Dumazet9092c652010-04-02 13:34:49 -07002906 if (!pdev)
2907 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002908 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002909 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2910 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
tchardingf4563a72017-02-09 17:56:07 +11002911
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002912 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2913 return 1;
2914 }
2915 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002916#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 return 0;
2918}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
Simon Horman3b392dd2014-06-04 08:53:17 +09002920/* If MPLS offload request, verify we are testing hardware MPLS features
2921 * instead of standard features for the netdev.
2922 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002923#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002924static netdev_features_t net_mpls_features(struct sk_buff *skb,
2925 netdev_features_t features,
2926 __be16 type)
2927{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002928 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002929 features &= skb->dev->mpls_features;
2930
2931 return features;
2932}
2933#else
2934static netdev_features_t net_mpls_features(struct sk_buff *skb,
2935 netdev_features_t features,
2936 __be16 type)
2937{
2938 return features;
2939}
2940#endif
2941
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002942static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002943 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002944{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002945 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002946 __be16 type;
2947
2948 type = skb_network_protocol(skb, &tmp);
2949 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002950
Ed Cashinc0d680e2012-09-19 15:49:00 +00002951 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002952 !can_checksum_protocol(features, type)) {
Alexander Duyck996e8022016-05-02 09:25:10 -07002953 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Jesse Grossf01a5232011-01-09 06:23:31 +00002954 }
Eric Dumazet7be2c822017-01-18 12:12:17 -08002955 if (illegal_highdma(skb->dev, skb))
2956 features &= ~NETIF_F_SG;
Jesse Grossf01a5232011-01-09 06:23:31 +00002957
2958 return features;
2959}
2960
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002961netdev_features_t passthru_features_check(struct sk_buff *skb,
2962 struct net_device *dev,
2963 netdev_features_t features)
2964{
2965 return features;
2966}
2967EXPORT_SYMBOL(passthru_features_check);
2968
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002969static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2970 struct net_device *dev,
2971 netdev_features_t features)
2972{
2973 return vlan_features_check(skb, features);
2974}
2975
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002976static netdev_features_t gso_features_check(const struct sk_buff *skb,
2977 struct net_device *dev,
2978 netdev_features_t features)
2979{
2980 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2981
2982 if (gso_segs > dev->gso_max_segs)
2983 return features & ~NETIF_F_GSO_MASK;
2984
Alexander Duyck802ab552016-04-10 21:45:03 -04002985 /* Support for GSO partial features requires software
2986 * intervention before we can actually process the packets
2987 * so we need to strip support for any partial features now
2988 * and we can pull them back in after we have partially
2989 * segmented the frame.
2990 */
2991 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2992 features &= ~dev->gso_partial_features;
2993
2994 /* Make sure to clear the IPv4 ID mangling feature if the
2995 * IPv4 header has the potential to be fragmented.
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002996 */
2997 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2998 struct iphdr *iph = skb->encapsulation ?
2999 inner_ip_hdr(skb) : ip_hdr(skb);
3000
3001 if (!(iph->frag_off & htons(IP_DF)))
3002 features &= ~NETIF_F_TSO_MANGLEID;
3003 }
3004
3005 return features;
3006}
3007
Florian Westphalc1e756b2014-05-05 15:00:44 +02003008netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00003009{
Jesse Gross5f352272014-12-23 22:37:26 -08003010 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07003011 netdev_features_t features = dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00003012
Alexander Duyckcbc53e02016-04-10 21:44:51 -04003013 if (skb_is_gso(skb))
3014 features = gso_features_check(skb, dev, features);
Ben Hutchings30b678d2012-07-30 15:57:00 +00003015
Jesse Gross5f352272014-12-23 22:37:26 -08003016 /* If encapsulation offload request, verify we are testing
3017 * hardware encapsulation features instead of standard
3018 * features for the netdev
3019 */
3020 if (skb->encapsulation)
3021 features &= dev->hw_enc_features;
3022
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09003023 if (skb_vlan_tagged(skb))
3024 features = netdev_intersect_features(features,
3025 dev->vlan_features |
3026 NETIF_F_HW_VLAN_CTAG_TX |
3027 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00003028
Jesse Gross5f352272014-12-23 22:37:26 -08003029 if (dev->netdev_ops->ndo_features_check)
3030 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3031 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09003032 else
3033 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08003034
Florian Westphalc1e756b2014-05-05 15:00:44 +02003035 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00003036}
Florian Westphalc1e756b2014-05-05 15:00:44 +02003037EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00003038
David S. Miller2ea25512014-08-29 21:10:01 -07003039static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07003040 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003041{
David S. Miller2ea25512014-08-29 21:10:01 -07003042 unsigned int len;
3043 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08003044
Salam Noureddine7866a622015-01-27 11:35:48 -08003045 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07003046 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00003047
David S. Miller2ea25512014-08-29 21:10:01 -07003048 len = skb->len;
3049 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07003050 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07003051 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00003052
Patrick McHardy572a9d72009-11-10 06:14:14 +00003053 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003054}
David S. Miller2ea25512014-08-29 21:10:01 -07003055
David S. Miller8dcda222014-09-01 15:06:40 -07003056struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3057 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07003058{
3059 struct sk_buff *skb = first;
3060 int rc = NETDEV_TX_OK;
3061
3062 while (skb) {
3063 struct sk_buff *next = skb->next;
3064
3065 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07003066 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07003067 if (unlikely(!dev_xmit_complete(rc))) {
3068 skb->next = next;
3069 goto out;
3070 }
3071
3072 skb = next;
3073 if (netif_xmit_stopped(txq) && skb) {
3074 rc = NETDEV_TX_BUSY;
3075 break;
3076 }
3077 }
3078
3079out:
3080 *ret = rc;
3081 return skb;
3082}
3083
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07003084static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3085 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07003086{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003087 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01003088 !vlan_hw_offload_capable(features, skb->vlan_proto))
3089 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07003090 return skb;
3091}
3092
Davide Caratti43c26a12017-05-18 15:44:41 +02003093int skb_csum_hwoffload_help(struct sk_buff *skb,
3094 const netdev_features_t features)
3095{
3096 if (unlikely(skb->csum_not_inet))
3097 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3098 skb_crc32c_csum_help(skb);
3099
3100 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3101}
3102EXPORT_SYMBOL(skb_csum_hwoffload_help);
3103
Steffen Klassertf53c7232017-12-20 10:41:36 +01003104static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
David S. Millereae3f882014-08-30 15:17:13 -07003105{
3106 netdev_features_t features;
3107
David S. Millereae3f882014-08-30 15:17:13 -07003108 features = netif_skb_features(skb);
3109 skb = validate_xmit_vlan(skb, features);
3110 if (unlikely(!skb))
3111 goto out_null;
3112
Johannes Berg8b86a612015-04-17 15:45:04 +02003113 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07003114 struct sk_buff *segs;
3115
3116 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08003117 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08003118 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08003119 } else if (segs) {
3120 consume_skb(skb);
3121 skb = segs;
3122 }
David S. Millereae3f882014-08-30 15:17:13 -07003123 } else {
3124 if (skb_needs_linearize(skb, features) &&
3125 __skb_linearize(skb))
3126 goto out_kfree_skb;
3127
3128 /* If packet is not checksummed and device does not
3129 * support checksumming for this protocol, complete
3130 * checksumming here.
3131 */
3132 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3133 if (skb->encapsulation)
3134 skb_set_inner_transport_header(skb,
3135 skb_checksum_start_offset(skb));
3136 else
3137 skb_set_transport_header(skb,
3138 skb_checksum_start_offset(skb));
Davide Caratti43c26a12017-05-18 15:44:41 +02003139 if (skb_csum_hwoffload_help(skb, features))
David S. Millereae3f882014-08-30 15:17:13 -07003140 goto out_kfree_skb;
3141 }
3142 }
3143
Steffen Klassertf53c7232017-12-20 10:41:36 +01003144 skb = validate_xmit_xfrm(skb, features, again);
Steffen Klassert3dca3f32017-12-20 10:41:31 +01003145
David S. Millereae3f882014-08-30 15:17:13 -07003146 return skb;
3147
3148out_kfree_skb:
3149 kfree_skb(skb);
3150out_null:
Eric Dumazetd21fd632016-04-12 21:50:07 -07003151 atomic_long_inc(&dev->tx_dropped);
David S. Millereae3f882014-08-30 15:17:13 -07003152 return NULL;
3153}
3154
Steffen Klassertf53c7232017-12-20 10:41:36 +01003155struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
Eric Dumazet55a93b32014-10-03 15:31:07 -07003156{
3157 struct sk_buff *next, *head = NULL, *tail;
3158
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003159 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07003160 next = skb->next;
3161 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003162
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003163 /* in case skb wont be segmented, point to itself */
3164 skb->prev = skb;
3165
Steffen Klassertf53c7232017-12-20 10:41:36 +01003166 skb = validate_xmit_skb(skb, dev, again);
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003167 if (!skb)
3168 continue;
3169
3170 if (!head)
3171 head = skb;
3172 else
3173 tail->next = skb;
3174 /* If skb was segmented, skb->prev points to
3175 * the last segment. If not, it still contains skb.
3176 */
3177 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003178 }
3179 return head;
3180}
Willem de Bruijn104ba782016-10-26 11:23:07 -04003181EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003182
Eric Dumazet1def9232013-01-10 12:36:42 +00003183static void qdisc_pkt_len_init(struct sk_buff *skb)
3184{
3185 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3186
3187 qdisc_skb_cb(skb)->pkt_len = skb->len;
3188
3189 /* To get more precise estimation of bytes sent on wire,
3190 * we add to pkt_len the headers size of all segments
3191 */
3192 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08003193 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00003194 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00003195
Eric Dumazet757b8b12013-01-15 21:14:21 -08003196 /* mac layer + network layer */
3197 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3198
3199 /* + transport layer */
Eric Dumazet7c68d1a2018-01-18 19:59:19 -08003200 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3201 const struct tcphdr *th;
3202 struct tcphdr _tcphdr;
3203
3204 th = skb_header_pointer(skb, skb_transport_offset(skb),
3205 sizeof(_tcphdr), &_tcphdr);
3206 if (likely(th))
3207 hdr_len += __tcp_hdrlen(th);
3208 } else {
3209 struct udphdr _udphdr;
3210
3211 if (skb_header_pointer(skb, skb_transport_offset(skb),
3212 sizeof(_udphdr), &_udphdr))
3213 hdr_len += sizeof(struct udphdr);
3214 }
Jason Wang15e5a032013-03-25 20:19:59 +00003215
3216 if (shinfo->gso_type & SKB_GSO_DODGY)
3217 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3218 shinfo->gso_size);
3219
3220 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00003221 }
3222}
3223
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003224static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3225 struct net_device *dev,
3226 struct netdev_queue *txq)
3227{
3228 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazet520ac302016-06-21 23:16:49 -07003229 struct sk_buff *to_free = NULL;
Eric Dumazeta2da5702011-01-20 03:48:19 +00003230 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003231 int rc;
3232
Eric Dumazeta2da5702011-01-20 03:48:19 +00003233 qdisc_calculate_pkt_len(skb, q);
John Fastabend6b3ba912017-12-07 09:54:25 -08003234
3235 if (q->flags & TCQ_F_NOLOCK) {
3236 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3237 __qdisc_drop(skb, &to_free);
3238 rc = NET_XMIT_DROP;
3239 } else {
3240 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3241 __qdisc_run(q);
3242 }
3243
3244 if (unlikely(to_free))
3245 kfree_skb_list(to_free);
3246 return rc;
3247 }
3248
Eric Dumazet79640a42010-06-02 05:09:29 -07003249 /*
3250 * Heuristic to force contended enqueues to serialize on a
3251 * separate lock before trying to get qdisc main lock.
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07003252 * This permits qdisc->running owner to get the lock more
Ying Xue9bf2b8c2014-06-26 15:56:31 +08003253 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07003254 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00003255 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003256 if (unlikely(contended))
3257 spin_lock(&q->busylock);
3258
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003259 spin_lock(root_lock);
3260 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
Eric Dumazet520ac302016-06-21 23:16:49 -07003261 __qdisc_drop(skb, &to_free);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003262 rc = NET_XMIT_DROP;
3263 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07003264 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003265 /*
3266 * This is a work-conserving queue; there are no old skbs
3267 * waiting to be sent out; and the qdisc is not running -
3268 * xmit the skb directly.
3269 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003270
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003271 qdisc_bstats_update(q, skb);
3272
Eric Dumazet55a93b32014-10-03 15:31:07 -07003273 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07003274 if (unlikely(contended)) {
3275 spin_unlock(&q->busylock);
3276 contended = false;
3277 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003278 __qdisc_run(q);
John Fastabend6c148182017-12-07 09:54:06 -08003279 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003280
John Fastabend6c148182017-12-07 09:54:06 -08003281 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003282 rc = NET_XMIT_SUCCESS;
3283 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -07003284 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07003285 if (qdisc_run_begin(q)) {
3286 if (unlikely(contended)) {
3287 spin_unlock(&q->busylock);
3288 contended = false;
3289 }
3290 __qdisc_run(q);
John Fastabend6c148182017-12-07 09:54:06 -08003291 qdisc_run_end(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003292 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003293 }
3294 spin_unlock(root_lock);
Eric Dumazet520ac302016-06-21 23:16:49 -07003295 if (unlikely(to_free))
3296 kfree_skb_list(to_free);
Eric Dumazet79640a42010-06-02 05:09:29 -07003297 if (unlikely(contended))
3298 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003299 return rc;
3300}
3301
Daniel Borkmann86f85152013-12-29 17:27:11 +01003302#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00003303static void skb_update_prio(struct sk_buff *skb)
3304{
Eric Dumazet4dcb31d2018-03-14 09:04:16 -07003305 const struct netprio_map *map;
3306 const struct sock *sk;
3307 unsigned int prioidx;
Neil Horman5bc14212011-11-22 05:10:51 +00003308
Eric Dumazet4dcb31d2018-03-14 09:04:16 -07003309 if (skb->priority)
3310 return;
3311 map = rcu_dereference_bh(skb->dev->priomap);
3312 if (!map)
3313 return;
3314 sk = skb_to_full_sk(skb);
3315 if (!sk)
3316 return;
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003317
Eric Dumazet4dcb31d2018-03-14 09:04:16 -07003318 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3319
3320 if (prioidx < map->priomap_len)
3321 skb->priority = map->priomap[prioidx];
Neil Horman5bc14212011-11-22 05:10:51 +00003322}
3323#else
3324#define skb_update_prio(skb)
3325#endif
3326
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02003327DEFINE_PER_CPU(int, xmit_recursion);
3328EXPORT_SYMBOL(xmit_recursion);
3329
Dave Jonesd29f7492008-07-22 14:09:06 -07003330/**
Michel Machado95603e22012-06-12 10:16:35 +00003331 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003332 * @net: network namespace this loopback is happening in
3333 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00003334 * @skb: buffer to transmit
3335 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003336int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00003337{
3338 skb_reset_mac_header(skb);
3339 __skb_pull(skb, skb_network_offset(skb));
3340 skb->pkt_type = PACKET_LOOPBACK;
3341 skb->ip_summed = CHECKSUM_UNNECESSARY;
3342 WARN_ON(!skb_dst(skb));
3343 skb_dst_force(skb);
3344 netif_rx_ni(skb);
3345 return 0;
3346}
3347EXPORT_SYMBOL(dev_loopback_xmit);
3348
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003349#ifdef CONFIG_NET_EGRESS
3350static struct sk_buff *
3351sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3352{
Jiri Pirko46209402017-11-03 11:46:25 +01003353 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003354 struct tcf_result cl_res;
3355
Jiri Pirko46209402017-11-03 11:46:25 +01003356 if (!miniq)
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003357 return skb;
3358
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003359 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
Jiri Pirko46209402017-11-03 11:46:25 +01003360 mini_qdisc_bstats_cpu_update(miniq, skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003361
Jiri Pirko46209402017-11-03 11:46:25 +01003362 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003363 case TC_ACT_OK:
3364 case TC_ACT_RECLASSIFY:
3365 skb->tc_index = TC_H_MIN(cl_res.classid);
3366 break;
3367 case TC_ACT_SHOT:
Jiri Pirko46209402017-11-03 11:46:25 +01003368 mini_qdisc_qstats_cpu_drop(miniq);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003369 *ret = NET_XMIT_DROP;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003370 kfree_skb(skb);
3371 return NULL;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003372 case TC_ACT_STOLEN:
3373 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +02003374 case TC_ACT_TRAP:
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003375 *ret = NET_XMIT_SUCCESS;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003376 consume_skb(skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003377 return NULL;
3378 case TC_ACT_REDIRECT:
3379 /* No need to push/pop skb's mac_header here on egress! */
3380 skb_do_redirect(skb);
3381 *ret = NET_XMIT_SUCCESS;
3382 return NULL;
3383 default:
3384 break;
3385 }
3386
3387 return skb;
3388}
3389#endif /* CONFIG_NET_EGRESS */
3390
Jiri Pirko638b2a62015-05-12 14:56:13 +02003391static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3392{
3393#ifdef CONFIG_XPS
3394 struct xps_dev_maps *dev_maps;
3395 struct xps_map *map;
3396 int queue_index = -1;
3397
3398 rcu_read_lock();
3399 dev_maps = rcu_dereference(dev->xps_maps);
3400 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04003401 unsigned int tci = skb->sender_cpu - 1;
3402
3403 if (dev->num_tc) {
3404 tci *= dev->num_tc;
3405 tci += netdev_get_prio_tc_map(dev, skb->priority);
3406 }
3407
3408 map = rcu_dereference(dev_maps->cpu_map[tci]);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003409 if (map) {
3410 if (map->len == 1)
3411 queue_index = map->queues[0];
3412 else
3413 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3414 map->len)];
3415 if (unlikely(queue_index >= dev->real_num_tx_queues))
3416 queue_index = -1;
3417 }
3418 }
3419 rcu_read_unlock();
3420
3421 return queue_index;
3422#else
3423 return -1;
3424#endif
3425}
3426
3427static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3428{
3429 struct sock *sk = skb->sk;
3430 int queue_index = sk_tx_queue_get(sk);
3431
3432 if (queue_index < 0 || skb->ooo_okay ||
3433 queue_index >= dev->real_num_tx_queues) {
3434 int new_index = get_xps_queue(dev, skb);
tchardingf4563a72017-02-09 17:56:07 +11003435
Jiri Pirko638b2a62015-05-12 14:56:13 +02003436 if (new_index < 0)
3437 new_index = skb_tx_hash(dev, skb);
3438
3439 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003440 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003441 rcu_access_pointer(sk->sk_dst_cache))
3442 sk_tx_queue_set(sk, new_index);
3443
3444 queue_index = new_index;
3445 }
3446
3447 return queue_index;
3448}
3449
3450struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3451 struct sk_buff *skb,
3452 void *accel_priv)
3453{
3454 int queue_index = 0;
3455
3456#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08003457 u32 sender_cpu = skb->sender_cpu - 1;
3458
3459 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003460 skb->sender_cpu = raw_smp_processor_id() + 1;
3461#endif
3462
3463 if (dev->real_num_tx_queues != 1) {
3464 const struct net_device_ops *ops = dev->netdev_ops;
tchardingf4563a72017-02-09 17:56:07 +11003465
Jiri Pirko638b2a62015-05-12 14:56:13 +02003466 if (ops->ndo_select_queue)
3467 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3468 __netdev_pick_tx);
3469 else
3470 queue_index = __netdev_pick_tx(dev, skb);
3471
Alexander Duyckd5845272017-11-22 10:57:41 -08003472 queue_index = netdev_cap_txqueue(dev, queue_index);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003473 }
3474
3475 skb_set_queue_mapping(skb, queue_index);
3476 return netdev_get_tx_queue(dev, queue_index);
3477}
3478
Michel Machado95603e22012-06-12 10:16:35 +00003479/**
Jason Wang9d08dd32014-01-20 11:25:13 +08003480 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07003481 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08003482 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07003483 *
3484 * Queue a buffer for transmission to a network device. The caller must
3485 * have set the device and priority and built the buffer before calling
3486 * this function. The function can be called from an interrupt.
3487 *
3488 * A negative errno code is returned on a failure. A success does not
3489 * guarantee the frame will be transmitted as it may be dropped due
3490 * to congestion or traffic shaping.
3491 *
3492 * -----------------------------------------------------------------------------------
3493 * I notice this method can also return errors from the queue disciplines,
3494 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3495 * be positive.
3496 *
3497 * Regardless of the return value, the skb is consumed, so it is currently
3498 * difficult to retry a send to this method. (You can bump the ref count
3499 * before sending to hold a reference for retry if you are careful.)
3500 *
3501 * When calling this method, interrupts MUST be enabled. This is because
3502 * the BH enable code must have IRQs enabled so that it will not deadlock.
3503 * --BLG
3504 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05303505static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506{
3507 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07003508 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 struct Qdisc *q;
3510 int rc = -ENOMEM;
Steffen Klassertf53c7232017-12-20 10:41:36 +01003511 bool again = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00003513 skb_reset_mac_header(skb);
3514
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003515 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3516 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3517
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003518 /* Disable soft irqs for various locks below. Also
3519 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003521 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522
Neil Horman5bc14212011-11-22 05:10:51 +00003523 skb_update_prio(skb);
3524
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003525 qdisc_pkt_len_init(skb);
3526#ifdef CONFIG_NET_CLS_ACT
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003527 skb->tc_at_ingress = 0;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003528# ifdef CONFIG_NET_EGRESS
3529 if (static_key_false(&egress_needed)) {
3530 skb = sch_handle_egress(skb, &rc, dev);
3531 if (!skb)
3532 goto out;
3533 }
3534# endif
3535#endif
Eric Dumazet02875872014-10-05 18:38:35 -07003536 /* If device/qdisc don't need skb->dst, release it right now while
3537 * its hot in this cpu cache.
3538 */
3539 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3540 skb_dst_drop(skb);
3541 else
3542 skb_dst_force(skb);
3543
Jason Wangf663dd92014-01-10 16:18:26 +08003544 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08003545 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07003546
Koki Sanagicf66ba52010-08-23 18:45:02 +09003547 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003549 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07003550 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 }
3552
3553 /* The device has no queue. Common case for software devices:
tchardingeb13da12017-02-09 17:56:06 +11003554 * loopback, all the sorts of tunnels...
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555
tchardingeb13da12017-02-09 17:56:06 +11003556 * Really, it is unlikely that netif_tx_lock protection is necessary
3557 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3558 * counters.)
3559 * However, it is possible, that they rely on protection
3560 * made by us here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561
tchardingeb13da12017-02-09 17:56:06 +11003562 * Check this and shot the lock. It is not prone from deadlocks.
3563 *Either shot noqueue qdisc, it is even simpler 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 */
3565 if (dev->flags & IFF_UP) {
3566 int cpu = smp_processor_id(); /* ok because BHs are off */
3567
David S. Millerc773e842008-07-08 23:13:53 -07003568 if (txq->xmit_lock_owner != cpu) {
Daniel Borkmanna70b5062016-06-10 21:19:06 +02003569 if (unlikely(__this_cpu_read(xmit_recursion) >
3570 XMIT_RECURSION_LIMIT))
Eric Dumazet745e20f2010-09-29 13:23:09 -07003571 goto recursion_alert;
3572
Steffen Klassertf53c7232017-12-20 10:41:36 +01003573 skb = validate_xmit_skb(skb, dev, &again);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003574 if (!skb)
Eric Dumazetd21fd632016-04-12 21:50:07 -07003575 goto out;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003576
David S. Millerc773e842008-07-08 23:13:53 -07003577 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578
Tom Herbert734664982011-11-28 16:32:44 +00003579 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07003580 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07003581 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07003582 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00003583 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003584 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 goto out;
3586 }
3587 }
David S. Millerc773e842008-07-08 23:13:53 -07003588 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003589 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3590 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 } else {
3592 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003593 * unfortunately
3594 */
3595recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003596 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3597 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 }
3599 }
3600
3601 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07003602 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
Eric Dumazet015f0682014-03-27 08:45:56 -07003604 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003605 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 return rc;
3607out:
Herbert Xud4828d82006-06-22 02:28:18 -07003608 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 return rc;
3610}
Jason Wangf663dd92014-01-10 16:18:26 +08003611
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003612int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003613{
3614 return __dev_queue_xmit(skb, NULL);
3615}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003616EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617
Jason Wangf663dd92014-01-10 16:18:26 +08003618int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3619{
3620 return __dev_queue_xmit(skb, accel_priv);
3621}
3622EXPORT_SYMBOL(dev_queue_xmit_accel);
3623
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624
tchardingeb13da12017-02-09 17:56:06 +11003625/*************************************************************************
3626 * Receiver routines
3627 *************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003629int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003630EXPORT_SYMBOL(netdev_max_backlog);
3631
Eric Dumazet3b098e22010-05-15 23:57:10 -07003632int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003633int netdev_budget __read_mostly = 300;
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04003634unsigned int __read_mostly netdev_budget_usecs = 2000;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01003635int weight_p __read_mostly = 64; /* old backlog weight */
3636int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3637int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3638int dev_rx_weight __read_mostly = 64;
3639int dev_tx_weight __read_mostly = 64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003641/* Called with irq disabled */
3642static inline void ____napi_schedule(struct softnet_data *sd,
3643 struct napi_struct *napi)
3644{
3645 list_add_tail(&napi->poll_list, &sd->poll_list);
3646 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3647}
3648
Eric Dumazetdf334542010-03-24 19:13:54 +00003649#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003650
3651/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003652struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003653EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003654u32 rps_cpu_mask __read_mostly;
3655EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003656
Ingo Molnarc5905af2012-02-24 08:31:31 +01003657struct static_key rps_needed __read_mostly;
Jason Wang3df97ba2016-04-25 23:13:42 -04003658EXPORT_SYMBOL(rps_needed);
Eric Dumazet13bfff22016-12-07 08:29:10 -08003659struct static_key rfs_needed __read_mostly;
3660EXPORT_SYMBOL(rfs_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +00003661
Ben Hutchingsc4454772011-01-19 11:03:53 +00003662static struct rps_dev_flow *
3663set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3664 struct rps_dev_flow *rflow, u16 next_cpu)
3665{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003666 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003667#ifdef CONFIG_RFS_ACCEL
3668 struct netdev_rx_queue *rxqueue;
3669 struct rps_dev_flow_table *flow_table;
3670 struct rps_dev_flow *old_rflow;
3671 u32 flow_id;
3672 u16 rxq_index;
3673 int rc;
3674
3675 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003676 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3677 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003678 goto out;
3679 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3680 if (rxq_index == skb_get_rx_queue(skb))
3681 goto out;
3682
3683 rxqueue = dev->_rx + rxq_index;
3684 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3685 if (!flow_table)
3686 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003687 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003688 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3689 rxq_index, flow_id);
3690 if (rc < 0)
3691 goto out;
3692 old_rflow = rflow;
3693 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003694 rflow->filter = rc;
3695 if (old_rflow->filter == rflow->filter)
3696 old_rflow->filter = RPS_NO_FILTER;
3697 out:
3698#endif
3699 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003700 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003701 }
3702
Ben Hutchings09994d12011-10-03 04:42:46 +00003703 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003704 return rflow;
3705}
3706
Tom Herbert0a9627f2010-03-16 08:03:29 +00003707/*
3708 * get_rps_cpu is called from netif_receive_skb and returns the target
3709 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003710 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003711 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003712static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3713 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003714{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003715 const struct rps_sock_flow_table *sock_flow_table;
3716 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003717 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003718 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003719 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003720 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003721 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003722
Tom Herbert0a9627f2010-03-16 08:03:29 +00003723 if (skb_rx_queue_recorded(skb)) {
3724 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003725
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003726 if (unlikely(index >= dev->real_num_rx_queues)) {
3727 WARN_ONCE(dev->real_num_rx_queues > 1,
3728 "%s received packet on queue %u, but number "
3729 "of RX queues is %u\n",
3730 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003731 goto done;
3732 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003733 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003734 }
3735
Eric Dumazet567e4b72015-02-06 12:59:01 -08003736 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3737
3738 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3739 map = rcu_dereference(rxqueue->rps_map);
3740 if (!flow_table && !map)
3741 goto done;
3742
Changli Gao2d47b452010-08-17 19:00:56 +00003743 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003744 hash = skb_get_hash(skb);
3745 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003746 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003747
Tom Herbertfec5e652010-04-16 16:01:27 -07003748 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3749 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003750 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003751 u32 next_cpu;
3752 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003753
Eric Dumazet567e4b72015-02-06 12:59:01 -08003754 /* First check into global flow table if there is a match */
3755 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3756 if ((ident ^ hash) & ~rps_cpu_mask)
3757 goto try_rps;
3758
3759 next_cpu = ident & rps_cpu_mask;
3760
3761 /* OK, now we know there is a match,
3762 * we can look at the local (per receive queue) flow table
3763 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003764 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003765 tcpu = rflow->cpu;
3766
Tom Herbertfec5e652010-04-16 16:01:27 -07003767 /*
3768 * If the desired CPU (where last recvmsg was done) is
3769 * different from current CPU (one in the rx-queue flow
3770 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003771 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003772 * - Current CPU is offline.
3773 * - The current CPU's queue tail has advanced beyond the
3774 * last packet that was enqueued using this table entry.
3775 * This guarantees that all previous packets for the flow
3776 * have been dequeued, thus preserving in order delivery.
3777 */
3778 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003779 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003780 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003781 rflow->last_qtail)) >= 0)) {
3782 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003783 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003784 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003785
Eric Dumazeta31196b2015-04-25 09:35:24 -07003786 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003787 *rflowp = rflow;
3788 cpu = tcpu;
3789 goto done;
3790 }
3791 }
3792
Eric Dumazet567e4b72015-02-06 12:59:01 -08003793try_rps:
3794
Tom Herbert0a9627f2010-03-16 08:03:29 +00003795 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003796 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003797 if (cpu_online(tcpu)) {
3798 cpu = tcpu;
3799 goto done;
3800 }
3801 }
3802
3803done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003804 return cpu;
3805}
3806
Ben Hutchingsc4454772011-01-19 11:03:53 +00003807#ifdef CONFIG_RFS_ACCEL
3808
3809/**
3810 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3811 * @dev: Device on which the filter was set
3812 * @rxq_index: RX queue index
3813 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3814 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3815 *
3816 * Drivers that implement ndo_rx_flow_steer() should periodically call
3817 * this function for each installed filter and remove the filters for
3818 * which it returns %true.
3819 */
3820bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3821 u32 flow_id, u16 filter_id)
3822{
3823 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3824 struct rps_dev_flow_table *flow_table;
3825 struct rps_dev_flow *rflow;
3826 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003827 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003828
3829 rcu_read_lock();
3830 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3831 if (flow_table && flow_id <= flow_table->mask) {
3832 rflow = &flow_table->flows[flow_id];
Mark Rutland6aa7de02017-10-23 14:07:29 -07003833 cpu = READ_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003834 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003835 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3836 rflow->last_qtail) <
3837 (int)(10 * flow_table->mask)))
3838 expire = false;
3839 }
3840 rcu_read_unlock();
3841 return expire;
3842}
3843EXPORT_SYMBOL(rps_may_expire_flow);
3844
3845#endif /* CONFIG_RFS_ACCEL */
3846
Tom Herbert0a9627f2010-03-16 08:03:29 +00003847/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003848static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003849{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003850 struct softnet_data *sd = data;
3851
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003852 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003853 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003854}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003855
Tom Herbertfec5e652010-04-16 16:01:27 -07003856#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003857
3858/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003859 * Check if this softnet_data structure is another cpu one
3860 * If yes, queue it to our IPI list and return 1
3861 * If no, return 0
3862 */
3863static int rps_ipi_queued(struct softnet_data *sd)
3864{
3865#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003866 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003867
3868 if (sd != mysd) {
3869 sd->rps_ipi_next = mysd->rps_ipi_list;
3870 mysd->rps_ipi_list = sd;
3871
3872 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3873 return 1;
3874 }
3875#endif /* CONFIG_RPS */
3876 return 0;
3877}
3878
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003879#ifdef CONFIG_NET_FLOW_LIMIT
3880int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3881#endif
3882
3883static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3884{
3885#ifdef CONFIG_NET_FLOW_LIMIT
3886 struct sd_flow_limit *fl;
3887 struct softnet_data *sd;
3888 unsigned int old_flow, new_flow;
3889
3890 if (qlen < (netdev_max_backlog >> 1))
3891 return false;
3892
Christoph Lameter903ceff2014-08-17 12:30:35 -05003893 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003894
3895 rcu_read_lock();
3896 fl = rcu_dereference(sd->flow_limit);
3897 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003898 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003899 old_flow = fl->history[fl->history_head];
3900 fl->history[fl->history_head] = new_flow;
3901
3902 fl->history_head++;
3903 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3904
3905 if (likely(fl->buckets[old_flow]))
3906 fl->buckets[old_flow]--;
3907
3908 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3909 fl->count++;
3910 rcu_read_unlock();
3911 return true;
3912 }
3913 }
3914 rcu_read_unlock();
3915#endif
3916 return false;
3917}
3918
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003919/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003920 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3921 * queue (may be a remote CPU queue).
3922 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003923static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3924 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003925{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003926 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003927 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003928 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003929
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003930 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003931
3932 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003933
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003934 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003935 if (!netif_running(skb->dev))
3936 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003937 qlen = skb_queue_len(&sd->input_pkt_queue);
3938 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003939 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003940enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003941 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003942 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003943 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003944 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003945 return NET_RX_SUCCESS;
3946 }
3947
Eric Dumazetebda37c22010-05-06 23:51:21 +00003948 /* Schedule NAPI for backlog device
3949 * We can use non atomic operation since we own the queue lock
3950 */
3951 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003952 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003953 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003954 }
3955 goto enqueue;
3956 }
3957
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003958drop:
Changli Gaodee42872010-05-02 05:42:16 +00003959 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003960 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003961
Tom Herbert0a9627f2010-03-16 08:03:29 +00003962 local_irq_restore(flags);
3963
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003964 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003965 kfree_skb(skb);
3966 return NET_RX_DROP;
3967}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01003969static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
3970{
3971 struct net_device *dev = skb->dev;
3972 struct netdev_rx_queue *rxqueue;
3973
3974 rxqueue = dev->_rx;
3975
3976 if (skb_rx_queue_recorded(skb)) {
3977 u16 index = skb_get_rx_queue(skb);
3978
3979 if (unlikely(index >= dev->real_num_rx_queues)) {
3980 WARN_ONCE(dev->real_num_rx_queues > 1,
3981 "%s received packet on queue %u, but number "
3982 "of RX queues is %u\n",
3983 dev->name, index, dev->real_num_rx_queues);
3984
3985 return rxqueue; /* Return first rxqueue */
3986 }
3987 rxqueue += index;
3988 }
3989 return rxqueue;
3990}
3991
John Fastabendd4455162017-07-17 09:26:45 -07003992static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3993 struct bpf_prog *xdp_prog)
3994{
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01003995 struct netdev_rx_queue *rxqueue;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02003996 u32 metalen, act = XDP_DROP;
John Fastabendd4455162017-07-17 09:26:45 -07003997 struct xdp_buff xdp;
John Fastabendd4455162017-07-17 09:26:45 -07003998 void *orig_data;
3999 int hlen, off;
4000 u32 mac_len;
4001
4002 /* Reinjected packets coming from act_mirred or similar should
4003 * not get XDP generic processing.
4004 */
4005 if (skb_cloned(skb))
4006 return XDP_PASS;
4007
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004008 /* XDP packets must be linear and must have sufficient headroom
4009 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4010 * native XDP provides, thus we need to do it here as well.
4011 */
4012 if (skb_is_nonlinear(skb) ||
4013 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4014 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4015 int troom = skb->tail + skb->data_len - skb->end;
4016
4017 /* In case we have to go down the path and also linearize,
4018 * then lets do the pskb_expand_head() work just once here.
4019 */
4020 if (pskb_expand_head(skb,
4021 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4022 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4023 goto do_drop;
Song Liu2d17d8d2017-12-14 17:17:56 -08004024 if (skb_linearize(skb))
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004025 goto do_drop;
4026 }
John Fastabendd4455162017-07-17 09:26:45 -07004027
4028 /* The XDP program wants to see the packet starting at the MAC
4029 * header.
4030 */
4031 mac_len = skb->data - skb_mac_header(skb);
4032 hlen = skb_headlen(skb) + mac_len;
4033 xdp.data = skb->data - mac_len;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004034 xdp.data_meta = xdp.data;
John Fastabendd4455162017-07-17 09:26:45 -07004035 xdp.data_end = xdp.data + hlen;
4036 xdp.data_hard_start = skb->data - skb_headroom(skb);
4037 orig_data = xdp.data;
4038
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01004039 rxqueue = netif_get_rxqueue(skb);
4040 xdp.rxq = &rxqueue->xdp_rxq;
4041
John Fastabendd4455162017-07-17 09:26:45 -07004042 act = bpf_prog_run_xdp(xdp_prog, &xdp);
4043
4044 off = xdp.data - orig_data;
4045 if (off > 0)
4046 __skb_pull(skb, off);
4047 else if (off < 0)
4048 __skb_push(skb, -off);
Edward Cree92dd5452017-09-19 18:45:56 +01004049 skb->mac_header += off;
John Fastabendd4455162017-07-17 09:26:45 -07004050
4051 switch (act) {
John Fastabend6103aa92017-07-17 09:27:50 -07004052 case XDP_REDIRECT:
John Fastabendd4455162017-07-17 09:26:45 -07004053 case XDP_TX:
4054 __skb_push(skb, mac_len);
John Fastabendd4455162017-07-17 09:26:45 -07004055 break;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004056 case XDP_PASS:
4057 metalen = xdp.data - xdp.data_meta;
4058 if (metalen)
4059 skb_metadata_set(skb, metalen);
4060 break;
John Fastabendd4455162017-07-17 09:26:45 -07004061 default:
4062 bpf_warn_invalid_xdp_action(act);
4063 /* fall through */
4064 case XDP_ABORTED:
4065 trace_xdp_exception(skb->dev, xdp_prog, act);
4066 /* fall through */
4067 case XDP_DROP:
4068 do_drop:
4069 kfree_skb(skb);
4070 break;
4071 }
4072
4073 return act;
4074}
4075
4076/* When doing generic XDP we have to bypass the qdisc layer and the
4077 * network taps in order to match in-driver-XDP behavior.
4078 */
Jason Wang7c497472017-08-11 19:41:17 +08004079void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
John Fastabendd4455162017-07-17 09:26:45 -07004080{
4081 struct net_device *dev = skb->dev;
4082 struct netdev_queue *txq;
4083 bool free_skb = true;
4084 int cpu, rc;
4085
4086 txq = netdev_pick_tx(dev, skb, NULL);
4087 cpu = smp_processor_id();
4088 HARD_TX_LOCK(dev, txq, cpu);
4089 if (!netif_xmit_stopped(txq)) {
4090 rc = netdev_start_xmit(skb, dev, txq, 0);
4091 if (dev_xmit_complete(rc))
4092 free_skb = false;
4093 }
4094 HARD_TX_UNLOCK(dev, txq);
4095 if (free_skb) {
4096 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4097 kfree_skb(skb);
4098 }
4099}
Jason Wang7c497472017-08-11 19:41:17 +08004100EXPORT_SYMBOL_GPL(generic_xdp_tx);
John Fastabendd4455162017-07-17 09:26:45 -07004101
4102static struct static_key generic_xdp_needed __read_mostly;
4103
Jason Wang7c497472017-08-11 19:41:17 +08004104int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
John Fastabendd4455162017-07-17 09:26:45 -07004105{
John Fastabendd4455162017-07-17 09:26:45 -07004106 if (xdp_prog) {
4107 u32 act = netif_receive_generic_xdp(skb, xdp_prog);
John Fastabend6103aa92017-07-17 09:27:50 -07004108 int err;
John Fastabendd4455162017-07-17 09:26:45 -07004109
4110 if (act != XDP_PASS) {
John Fastabend6103aa92017-07-17 09:27:50 -07004111 switch (act) {
4112 case XDP_REDIRECT:
Jesper Dangaard Brouer2facaad2017-08-24 12:33:08 +02004113 err = xdp_do_generic_redirect(skb->dev, skb,
4114 xdp_prog);
John Fastabend6103aa92017-07-17 09:27:50 -07004115 if (err)
4116 goto out_redir;
4117 /* fallthru to submit skb */
4118 case XDP_TX:
John Fastabendd4455162017-07-17 09:26:45 -07004119 generic_xdp_tx(skb, xdp_prog);
John Fastabend6103aa92017-07-17 09:27:50 -07004120 break;
4121 }
John Fastabendd4455162017-07-17 09:26:45 -07004122 return XDP_DROP;
4123 }
4124 }
4125 return XDP_PASS;
John Fastabend6103aa92017-07-17 09:27:50 -07004126out_redir:
John Fastabend6103aa92017-07-17 09:27:50 -07004127 kfree_skb(skb);
4128 return XDP_DROP;
John Fastabendd4455162017-07-17 09:26:45 -07004129}
Jason Wang7c497472017-08-11 19:41:17 +08004130EXPORT_SYMBOL_GPL(do_xdp_generic);
John Fastabendd4455162017-07-17 09:26:45 -07004131
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004132static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004134 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135
Eric Dumazet588f0332011-11-15 04:12:55 +00004136 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137
Koki Sanagicf66ba52010-08-23 18:45:02 +09004138 trace_netif_rx(skb);
John Fastabendd4455162017-07-17 09:26:45 -07004139
4140 if (static_key_false(&generic_xdp_needed)) {
John Fastabendbbbe2112017-09-08 14:00:30 -07004141 int ret;
4142
4143 preempt_disable();
4144 rcu_read_lock();
4145 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4146 rcu_read_unlock();
4147 preempt_enable();
John Fastabendd4455162017-07-17 09:26:45 -07004148
John Fastabend6103aa92017-07-17 09:27:50 -07004149 /* Consider XDP consuming the packet a success from
4150 * the netdev point of view we do not want to count
4151 * this as an error.
4152 */
John Fastabendd4455162017-07-17 09:26:45 -07004153 if (ret != XDP_PASS)
John Fastabend6103aa92017-07-17 09:27:50 -07004154 return NET_RX_SUCCESS;
John Fastabendd4455162017-07-17 09:26:45 -07004155 }
4156
Eric Dumazetdf334542010-03-24 19:13:54 +00004157#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004158 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07004159 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004160 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161
Changli Gaocece1942010-08-07 20:35:43 -07004162 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004163 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07004164
4165 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004166 if (cpu < 0)
4167 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07004168
4169 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4170
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004171 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07004172 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00004173 } else
4174#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07004175 {
4176 unsigned int qtail;
tchardingf4563a72017-02-09 17:56:07 +11004177
Tom Herbertfec5e652010-04-16 16:01:27 -07004178 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4179 put_cpu();
4180 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004181 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004183
4184/**
4185 * netif_rx - post buffer to the network code
4186 * @skb: buffer to post
4187 *
4188 * This function receives a packet from a device driver and queues it for
4189 * the upper (protocol) levels to process. It always succeeds. The buffer
4190 * may be dropped during processing for congestion control or by the
4191 * protocol layers.
4192 *
4193 * return values:
4194 * NET_RX_SUCCESS (no congestion)
4195 * NET_RX_DROP (packet was dropped)
4196 *
4197 */
4198
4199int netif_rx(struct sk_buff *skb)
4200{
4201 trace_netif_rx_entry(skb);
4202
4203 return netif_rx_internal(skb);
4204}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004205EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206
4207int netif_rx_ni(struct sk_buff *skb)
4208{
4209 int err;
4210
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004211 trace_netif_rx_ni_entry(skb);
4212
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004214 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 if (local_softirq_pending())
4216 do_softirq();
4217 preempt_enable();
4218
4219 return err;
4220}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221EXPORT_SYMBOL(netif_rx_ni);
4222
Emese Revfy0766f782016-06-20 20:42:34 +02004223static __latent_entropy void net_tx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224{
Christoph Lameter903ceff2014-08-17 12:30:35 -05004225 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226
4227 if (sd->completion_queue) {
4228 struct sk_buff *clist;
4229
4230 local_irq_disable();
4231 clist = sd->completion_queue;
4232 sd->completion_queue = NULL;
4233 local_irq_enable();
4234
4235 while (clist) {
4236 struct sk_buff *skb = clist;
tchardingf4563a72017-02-09 17:56:07 +11004237
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238 clist = clist->next;
4239
Reshetova, Elena63354792017-06-30 13:07:58 +03004240 WARN_ON(refcount_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08004241 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4242 trace_consume_skb(skb);
4243 else
4244 trace_kfree_skb(skb, net_tx_action);
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01004245
4246 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4247 __kfree_skb(skb);
4248 else
4249 __kfree_skb_defer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250 }
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01004251
4252 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 }
4254
4255 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07004256 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
4258 local_irq_disable();
4259 head = sd->output_queue;
4260 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00004261 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262 local_irq_enable();
4263
4264 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07004265 struct Qdisc *q = head;
John Fastabend6b3ba912017-12-07 09:54:25 -08004266 spinlock_t *root_lock = NULL;
David S. Miller37437bb2008-07-16 02:15:04 -07004267
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 head = head->next_sched;
4269
John Fastabend6b3ba912017-12-07 09:54:25 -08004270 if (!(q->flags & TCQ_F_NOLOCK)) {
4271 root_lock = qdisc_lock(q);
4272 spin_lock(root_lock);
4273 }
Eric Dumazet3bcb8462016-06-04 20:02:28 -07004274 /* We need to make sure head->next_sched is read
4275 * before clearing __QDISC_STATE_SCHED
4276 */
4277 smp_mb__before_atomic();
4278 clear_bit(__QDISC_STATE_SCHED, &q->state);
4279 qdisc_run(q);
John Fastabend6b3ba912017-12-07 09:54:25 -08004280 if (root_lock)
4281 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 }
4283 }
Steffen Klassertf53c7232017-12-20 10:41:36 +01004284
4285 xfrm_dev_backlog(sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286}
4287
Javier Martinez Canillas181402a2016-09-09 08:43:15 -04004288#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
Michał Mirosławda678292009-06-05 05:35:28 +00004289/* This hook is defined here for ATM LANE */
4290int (*br_fdb_test_addr_hook)(struct net_device *dev,
4291 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07004292EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00004293#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004295static inline struct sk_buff *
4296sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4297 struct net_device *orig_dev)
Herbert Xuf697c3e2007-10-14 00:38:47 -07004298{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004299#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko46209402017-11-03 11:46:25 +01004300 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004301 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00004302
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004303 /* If there's at least one ingress present somewhere (so
4304 * we get here via enabled static key), remaining devices
4305 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004306 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004307 */
Jiri Pirko46209402017-11-03 11:46:25 +01004308 if (!miniq)
Daniel Borkmann45771392015-04-10 23:07:54 +02004309 return skb;
Jiri Pirko46209402017-11-03 11:46:25 +01004310
Herbert Xuf697c3e2007-10-14 00:38:47 -07004311 if (*pt_prev) {
4312 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4313 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004314 }
4315
Florian Westphal33654952015-05-14 00:36:28 +02004316 qdisc_skb_cb(skb)->pkt_len = skb->len;
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05004317 skb->tc_at_ingress = 1;
Jiri Pirko46209402017-11-03 11:46:25 +01004318 mini_qdisc_bstats_cpu_update(miniq, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004319
Jiri Pirko46209402017-11-03 11:46:25 +01004320 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004321 case TC_ACT_OK:
4322 case TC_ACT_RECLASSIFY:
4323 skb->tc_index = TC_H_MIN(cl_res.classid);
4324 break;
4325 case TC_ACT_SHOT:
Jiri Pirko46209402017-11-03 11:46:25 +01004326 mini_qdisc_qstats_cpu_drop(miniq);
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004327 kfree_skb(skb);
4328 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004329 case TC_ACT_STOLEN:
4330 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +02004331 case TC_ACT_TRAP:
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004332 consume_skb(skb);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004333 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07004334 case TC_ACT_REDIRECT:
4335 /* skb_mac_header check was done by cls/act_bpf, so
4336 * we can safely push the L2 header back before
4337 * redirecting to another netdev
4338 */
4339 __skb_push(skb, skb->mac_len);
4340 skb_do_redirect(skb);
4341 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004342 default:
4343 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004344 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004345#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07004346 return skb;
4347}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004349/**
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07004350 * netdev_is_rx_handler_busy - check if receive handler is registered
4351 * @dev: device to check
4352 *
4353 * Check if a receive handler is already registered for a given device.
4354 * Return true if there one.
4355 *
4356 * The caller must hold the rtnl_mutex.
4357 */
4358bool netdev_is_rx_handler_busy(struct net_device *dev)
4359{
4360 ASSERT_RTNL();
4361 return dev && rtnl_dereference(dev->rx_handler);
4362}
4363EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4364
4365/**
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004366 * netdev_rx_handler_register - register receive handler
4367 * @dev: device to register a handler for
4368 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00004369 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004370 *
Masanari Iidae2278672014-02-18 22:54:36 +09004371 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004372 * called from __netif_receive_skb. A negative errno code is returned
4373 * on a failure.
4374 *
4375 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004376 *
4377 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004378 */
4379int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00004380 rx_handler_func_t *rx_handler,
4381 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004382{
Mahesh Bandewar1b7cd002017-01-18 15:02:49 -08004383 if (netdev_is_rx_handler_busy(dev))
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004384 return -EBUSY;
4385
Paolo Abenif54262502018-03-09 10:39:24 +01004386 if (dev->priv_flags & IFF_NO_RX_HANDLER)
4387 return -EINVAL;
4388
Eric Dumazet00cfec32013-03-29 03:01:22 +00004389 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00004390 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004391 rcu_assign_pointer(dev->rx_handler, rx_handler);
4392
4393 return 0;
4394}
4395EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4396
4397/**
4398 * netdev_rx_handler_unregister - unregister receive handler
4399 * @dev: device to unregister a handler from
4400 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00004401 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004402 *
4403 * The caller must hold the rtnl_mutex.
4404 */
4405void netdev_rx_handler_unregister(struct net_device *dev)
4406{
4407
4408 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004409 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00004410 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4411 * section has a guarantee to see a non NULL rx_handler_data
4412 * as well.
4413 */
4414 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004415 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004416}
4417EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4418
Mel Gormanb4b9e352012-07-31 16:44:26 -07004419/*
4420 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4421 * the special handling of PFMEMALLOC skbs.
4422 */
4423static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4424{
4425 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07004426 case htons(ETH_P_ARP):
4427 case htons(ETH_P_IP):
4428 case htons(ETH_P_IPV6):
4429 case htons(ETH_P_8021Q):
4430 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07004431 return true;
4432 default:
4433 return false;
4434 }
4435}
4436
Pablo Neirae687ad62015-05-13 18:19:38 +02004437static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4438 int *ret, struct net_device *orig_dev)
4439{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004440#ifdef CONFIG_NETFILTER_INGRESS
Pablo Neirae687ad62015-05-13 18:19:38 +02004441 if (nf_hook_ingress_active(skb)) {
Aaron Conole2c1e2702016-09-21 11:35:03 -04004442 int ingress_retval;
4443
Pablo Neirae687ad62015-05-13 18:19:38 +02004444 if (*pt_prev) {
4445 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4446 *pt_prev = NULL;
4447 }
4448
Aaron Conole2c1e2702016-09-21 11:35:03 -04004449 rcu_read_lock();
4450 ingress_retval = nf_hook_ingress(skb);
4451 rcu_read_unlock();
4452 return ingress_retval;
Pablo Neirae687ad62015-05-13 18:19:38 +02004453 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004454#endif /* CONFIG_NETFILTER_INGRESS */
Pablo Neirae687ad62015-05-13 18:19:38 +02004455 return 0;
4456}
Pablo Neirae687ad62015-05-13 18:19:38 +02004457
David S. Miller9754e292013-02-14 15:57:38 -05004458static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459{
4460 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004461 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07004462 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004463 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08004465 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466
Eric Dumazet588f0332011-11-15 04:12:55 +00004467 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07004468
Koki Sanagicf66ba52010-08-23 18:45:02 +09004469 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08004470
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07004471 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00004472
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07004473 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00004474 if (!skb_transport_header_was_set(skb))
4475 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00004476 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477
4478 pt_prev = NULL;
4479
David S. Miller63d8ea72011-02-28 10:48:59 -08004480another_round:
David S. Millerb6858172012-07-23 16:27:54 -07004481 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08004482
4483 __this_cpu_inc(softnet_data.processed);
4484
Patrick McHardy8ad227f2013-04-19 02:04:31 +00004485 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4486 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004487 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004488 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004489 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004490 }
4491
Willem de Bruijne7246e12017-01-07 17:06:35 -05004492 if (skb_skip_tc_classify(skb))
4493 goto skip_classify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494
David S. Miller9754e292013-02-14 15:57:38 -05004495 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07004496 goto skip_taps;
4497
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08004499 if (pt_prev)
4500 ret = deliver_skb(skb, pt_prev, orig_dev);
4501 pt_prev = ptype;
4502 }
4503
4504 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4505 if (pt_prev)
4506 ret = deliver_skb(skb, pt_prev, orig_dev);
4507 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508 }
4509
Mel Gormanb4b9e352012-07-31 16:44:26 -07004510skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02004511#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02004512 if (static_key_false(&ingress_needed)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004513 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
Daniel Borkmann45771392015-04-10 23:07:54 +02004514 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004515 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02004516
4517 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004518 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02004519 }
Pablo Neira1cf519002015-05-13 18:19:37 +02004520#endif
Willem de Bruijna5135bc2017-01-07 17:06:36 -05004521 skb_reset_tc(skb);
Willem de Bruijne7246e12017-01-07 17:06:35 -05004522skip_classify:
David S. Miller9754e292013-02-14 15:57:38 -05004523 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07004524 goto drop;
4525
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004526 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00004527 if (pt_prev) {
4528 ret = deliver_skb(skb, pt_prev, orig_dev);
4529 pt_prev = NULL;
4530 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004531 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00004532 goto another_round;
4533 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004534 goto out;
John Fastabend24257172011-10-10 09:16:41 +00004535 }
4536
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004537 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004538 if (rx_handler) {
4539 if (pt_prev) {
4540 ret = deliver_skb(skb, pt_prev, orig_dev);
4541 pt_prev = NULL;
4542 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004543 switch (rx_handler(&skb)) {
4544 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00004545 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004546 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004547 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08004548 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004549 case RX_HANDLER_EXACT:
4550 deliver_exact = true;
4551 case RX_HANDLER_PASS:
4552 break;
4553 default:
4554 BUG();
4555 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004558 if (unlikely(skb_vlan_tag_present(skb))) {
4559 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07004560 skb->pkt_type = PACKET_OTHERHOST;
4561 /* Note: we might in the future use prio bits
4562 * and set skb->priority like in vlan_do_receive()
4563 * For the time being, just ignore Priority Code Point
4564 */
4565 skb->vlan_tci = 0;
4566 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004567
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08004569
4570 /* deliver only exact match when indicated */
4571 if (likely(!deliver_exact)) {
4572 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4573 &ptype_base[ntohs(type) &
4574 PTYPE_HASH_MASK]);
4575 }
4576
4577 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4578 &orig_dev->ptype_specific);
4579
4580 if (unlikely(skb->dev != orig_dev)) {
4581 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4582 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583 }
4584
4585 if (pt_prev) {
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04004586 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00004587 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004588 else
4589 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07004591drop:
Jarod Wilson6e7333d2016-02-01 18:51:05 -05004592 if (!deliver_exact)
4593 atomic_long_inc(&skb->dev->rx_dropped);
4594 else
4595 atomic_long_inc(&skb->dev->rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 kfree_skb(skb);
4597 /* Jamal, now you will not able to escape explaining
4598 * me how you were going to use this. :-)
4599 */
4600 ret = NET_RX_DROP;
4601 }
4602
Julian Anastasov2c17d272015-07-09 09:59:10 +03004603out:
David S. Miller9754e292013-02-14 15:57:38 -05004604 return ret;
4605}
4606
Jesper Dangaard Brouer1c601d82017-10-16 12:19:39 +02004607/**
4608 * netif_receive_skb_core - special purpose version of netif_receive_skb
4609 * @skb: buffer to process
4610 *
4611 * More direct receive version of netif_receive_skb(). It should
4612 * only be used by callers that have a need to skip RPS and Generic XDP.
4613 * Caller must also take care of handling if (page_is_)pfmemalloc.
4614 *
4615 * This function may only be called from softirq context and interrupts
4616 * should be enabled.
4617 *
4618 * Return values (usually ignored):
4619 * NET_RX_SUCCESS: no congestion
4620 * NET_RX_DROP: packet was dropped
4621 */
4622int netif_receive_skb_core(struct sk_buff *skb)
4623{
4624 int ret;
4625
4626 rcu_read_lock();
4627 ret = __netif_receive_skb_core(skb, false);
4628 rcu_read_unlock();
4629
4630 return ret;
4631}
4632EXPORT_SYMBOL(netif_receive_skb_core);
4633
David S. Miller9754e292013-02-14 15:57:38 -05004634static int __netif_receive_skb(struct sk_buff *skb)
4635{
4636 int ret;
4637
4638 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004639 unsigned int noreclaim_flag;
David S. Miller9754e292013-02-14 15:57:38 -05004640
4641 /*
4642 * PFMEMALLOC skbs are special, they should
4643 * - be delivered to SOCK_MEMALLOC sockets only
4644 * - stay away from userspace
4645 * - have bounded memory usage
4646 *
4647 * Use PF_MEMALLOC as this saves us from propagating the allocation
4648 * context down to all allocation sites.
4649 */
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004650 noreclaim_flag = memalloc_noreclaim_save();
David S. Miller9754e292013-02-14 15:57:38 -05004651 ret = __netif_receive_skb_core(skb, true);
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004652 memalloc_noreclaim_restore(noreclaim_flag);
David S. Miller9754e292013-02-14 15:57:38 -05004653 } else
4654 ret = __netif_receive_skb_core(skb, false);
4655
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 return ret;
4657}
Tom Herbert0a9627f2010-03-16 08:03:29 +00004658
Jakub Kicinskif4e63522017-11-03 13:56:16 -07004659static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
David S. Millerb5cdae32017-04-18 15:36:58 -04004660{
Martin KaFai Lau58038692017-06-15 17:29:09 -07004661 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
David S. Millerb5cdae32017-04-18 15:36:58 -04004662 struct bpf_prog *new = xdp->prog;
4663 int ret = 0;
4664
4665 switch (xdp->command) {
Martin KaFai Lau58038692017-06-15 17:29:09 -07004666 case XDP_SETUP_PROG:
David S. Millerb5cdae32017-04-18 15:36:58 -04004667 rcu_assign_pointer(dev->xdp_prog, new);
4668 if (old)
4669 bpf_prog_put(old);
4670
4671 if (old && !new) {
4672 static_key_slow_dec(&generic_xdp_needed);
4673 } else if (new && !old) {
4674 static_key_slow_inc(&generic_xdp_needed);
4675 dev_disable_lro(dev);
Michael Chan56f5aa72017-12-16 03:09:41 -05004676 dev_disable_gro_hw(dev);
David S. Millerb5cdae32017-04-18 15:36:58 -04004677 }
4678 break;
David S. Millerb5cdae32017-04-18 15:36:58 -04004679
4680 case XDP_QUERY_PROG:
Martin KaFai Lau58038692017-06-15 17:29:09 -07004681 xdp->prog_attached = !!old;
4682 xdp->prog_id = old ? old->aux->id : 0;
David S. Millerb5cdae32017-04-18 15:36:58 -04004683 break;
4684
4685 default:
4686 ret = -EINVAL;
4687 break;
4688 }
4689
4690 return ret;
4691}
4692
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004693static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004694{
Julian Anastasov2c17d272015-07-09 09:59:10 +03004695 int ret;
4696
Eric Dumazet588f0332011-11-15 04:12:55 +00004697 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07004698
Richard Cochranc1f19b52010-07-17 08:49:36 +00004699 if (skb_defer_rx_timestamp(skb))
4700 return NET_RX_SUCCESS;
4701
David S. Millerb5cdae32017-04-18 15:36:58 -04004702 if (static_key_false(&generic_xdp_needed)) {
John Fastabendbbbe2112017-09-08 14:00:30 -07004703 int ret;
David S. Millerb5cdae32017-04-18 15:36:58 -04004704
John Fastabendbbbe2112017-09-08 14:00:30 -07004705 preempt_disable();
4706 rcu_read_lock();
4707 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4708 rcu_read_unlock();
4709 preempt_enable();
4710
4711 if (ret != XDP_PASS)
John Fastabendd4455162017-07-17 09:26:45 -07004712 return NET_RX_DROP;
David S. Millerb5cdae32017-04-18 15:36:58 -04004713 }
4714
John Fastabendbbbe2112017-09-08 14:00:30 -07004715 rcu_read_lock();
Eric Dumazetdf334542010-03-24 19:13:54 +00004716#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004717 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07004718 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004719 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07004720
Eric Dumazet3b098e22010-05-15 23:57:10 -07004721 if (cpu >= 0) {
4722 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4723 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00004724 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004725 }
Tom Herbertfec5e652010-04-16 16:01:27 -07004726 }
Tom Herbert1e94d722010-03-18 17:45:44 -07004727#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03004728 ret = __netif_receive_skb(skb);
4729 rcu_read_unlock();
4730 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004731}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004732
4733/**
4734 * netif_receive_skb - process receive buffer from network
4735 * @skb: buffer to process
4736 *
4737 * netif_receive_skb() is the main receive data processing function.
4738 * It always succeeds. The buffer may be dropped during processing
4739 * for congestion control or by the protocol layers.
4740 *
4741 * This function may only be called from softirq context and interrupts
4742 * should be enabled.
4743 *
4744 * Return values (usually ignored):
4745 * NET_RX_SUCCESS: no congestion
4746 * NET_RX_DROP: packet was dropped
4747 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004748int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004749{
4750 trace_netif_receive_skb_entry(skb);
4751
4752 return netif_receive_skb_internal(skb);
4753}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004754EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755
Eric Dumazet41852492016-08-26 12:50:39 -07004756DEFINE_PER_CPU(struct work_struct, flush_works);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004757
4758/* Network device is going away, flush any packets still pending */
4759static void flush_backlog(struct work_struct *work)
4760{
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004761 struct sk_buff *skb, *tmp;
4762 struct softnet_data *sd;
4763
4764 local_bh_disable();
4765 sd = this_cpu_ptr(&softnet_data);
4766
4767 local_irq_disable();
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004768 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004769 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004770 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004771 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004772 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004773 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004774 }
Changli Gao6e7676c2010-04-27 15:07:33 -07004775 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004776 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004777 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004778
4779 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004780 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Changli Gao6e7676c2010-04-27 15:07:33 -07004781 __skb_unlink(skb, &sd->process_queue);
4782 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004783 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004784 }
4785 }
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004786 local_bh_enable();
4787}
4788
Eric Dumazet41852492016-08-26 12:50:39 -07004789static void flush_all_backlogs(void)
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004790{
4791 unsigned int cpu;
4792
4793 get_online_cpus();
4794
Eric Dumazet41852492016-08-26 12:50:39 -07004795 for_each_online_cpu(cpu)
4796 queue_work_on(cpu, system_highpri_wq,
4797 per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004798
4799 for_each_online_cpu(cpu)
Eric Dumazet41852492016-08-26 12:50:39 -07004800 flush_work(per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004801
4802 put_online_cpus();
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004803}
4804
Herbert Xud565b0a2008-12-15 23:38:52 -08004805static int napi_gro_complete(struct sk_buff *skb)
4806{
Vlad Yasevich22061d82012-11-15 08:49:11 +00004807 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004808 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004809 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08004810 int err = -ENOENT;
4811
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004812 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4813
Herbert Xufc59f9a2009-04-14 15:11:06 -07004814 if (NAPI_GRO_CB(skb)->count == 1) {
4815 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004816 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07004817 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004818
4819 rcu_read_lock();
4820 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004821 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08004822 continue;
4823
Jerry Chu299603e82013-12-11 20:53:45 -08004824 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004825 break;
4826 }
4827 rcu_read_unlock();
4828
4829 if (err) {
4830 WARN_ON(&ptype->list == head);
4831 kfree_skb(skb);
4832 return NET_RX_SUCCESS;
4833 }
4834
4835out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004836 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004837}
4838
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004839/* napi->gro_list contains packets ordered by age.
4840 * youngest packets at the head of it.
4841 * Complete skbs in reverse order to reduce latencies.
4842 */
4843void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08004844{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004845 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004846
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004847 /* scan list and build reverse chain */
4848 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4849 skb->prev = prev;
4850 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08004851 }
4852
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004853 for (skb = prev; skb; skb = prev) {
4854 skb->next = NULL;
4855
4856 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4857 return;
4858
4859 prev = skb->prev;
4860 napi_gro_complete(skb);
4861 napi->gro_count--;
4862 }
4863
Herbert Xud565b0a2008-12-15 23:38:52 -08004864 napi->gro_list = NULL;
4865}
Eric Dumazet86cac582010-08-31 18:25:32 +00004866EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08004867
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004868static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4869{
4870 struct sk_buff *p;
4871 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08004872 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004873
4874 for (p = napi->gro_list; p; p = p->next) {
4875 unsigned long diffs;
4876
Tom Herbert0b4cec82014-01-15 08:58:06 -08004877 NAPI_GRO_CB(p)->flush = 0;
4878
4879 if (hash != skb_get_hash_raw(p)) {
4880 NAPI_GRO_CB(p)->same_flow = 0;
4881 continue;
4882 }
4883
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004884 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4885 diffs |= p->vlan_tci ^ skb->vlan_tci;
Jesse Grossce87fc62016-01-20 17:59:49 -08004886 diffs |= skb_metadata_dst_cmp(p, skb);
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004887 diffs |= skb_metadata_differs(p, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004888 if (maclen == ETH_HLEN)
4889 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004890 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004891 else if (!diffs)
4892 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004893 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004894 maclen);
4895 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004896 }
4897}
4898
Jerry Chu299603e82013-12-11 20:53:45 -08004899static void skb_gro_reset_offset(struct sk_buff *skb)
4900{
4901 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4902 const skb_frag_t *frag0 = &pinfo->frags[0];
4903
4904 NAPI_GRO_CB(skb)->data_offset = 0;
4905 NAPI_GRO_CB(skb)->frag0 = NULL;
4906 NAPI_GRO_CB(skb)->frag0_len = 0;
4907
4908 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4909 pinfo->nr_frags &&
4910 !PageHighMem(skb_frag_page(frag0))) {
4911 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
Eric Dumazet7cfd5fd2017-01-10 19:52:43 -08004912 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4913 skb_frag_size(frag0),
4914 skb->end - skb->tail);
Herbert Xud565b0a2008-12-15 23:38:52 -08004915 }
4916}
4917
Eric Dumazeta50e2332014-03-29 21:28:21 -07004918static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4919{
4920 struct skb_shared_info *pinfo = skb_shinfo(skb);
4921
4922 BUG_ON(skb->end - skb->tail < grow);
4923
4924 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4925
4926 skb->data_len -= grow;
4927 skb->tail += grow;
4928
4929 pinfo->frags[0].page_offset += grow;
4930 skb_frag_size_sub(&pinfo->frags[0], grow);
4931
4932 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4933 skb_frag_unref(skb, 0);
4934 memmove(pinfo->frags, pinfo->frags + 1,
4935 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4936 }
4937}
4938
Rami Rosenbb728822012-11-28 21:55:25 +00004939static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004940{
4941 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004942 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004943 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004944 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004945 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004946 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004947 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004948
David S. Millerb5cdae32017-04-18 15:36:58 -04004949 if (netif_elide_gro(skb->dev))
Herbert Xud565b0a2008-12-15 23:38:52 -08004950 goto normal;
4951
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004952 gro_list_prepare(napi, skb);
4953
Herbert Xud565b0a2008-12-15 23:38:52 -08004954 rcu_read_lock();
4955 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004956 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004957 continue;
4958
Herbert Xu86911732009-01-29 14:19:50 +00004959 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004960 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004961 NAPI_GRO_CB(skb)->same_flow = 0;
Eric Dumazetd61d0722016-11-07 11:12:27 -08004962 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
Herbert Xu5d38a072009-01-04 16:13:40 -08004963 NAPI_GRO_CB(skb)->free = 0;
Jesse Grossfac8e0f2016-03-19 09:32:01 -07004964 NAPI_GRO_CB(skb)->encap_mark = 0;
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02004965 NAPI_GRO_CB(skb)->recursion_counter = 0;
Alexander Duycka0ca1532016-04-05 09:13:39 -07004966 NAPI_GRO_CB(skb)->is_fou = 0;
Alexander Duyck15305452016-04-10 21:44:57 -04004967 NAPI_GRO_CB(skb)->is_atomic = 1;
Tom Herbert15e23962015-02-10 16:30:31 -08004968 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004969
Tom Herbert662880f2014-08-27 21:26:56 -07004970 /* Setup for GRO checksum validation */
4971 switch (skb->ip_summed) {
4972 case CHECKSUM_COMPLETE:
4973 NAPI_GRO_CB(skb)->csum = skb->csum;
4974 NAPI_GRO_CB(skb)->csum_valid = 1;
4975 NAPI_GRO_CB(skb)->csum_cnt = 0;
4976 break;
4977 case CHECKSUM_UNNECESSARY:
4978 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4979 NAPI_GRO_CB(skb)->csum_valid = 0;
4980 break;
4981 default:
4982 NAPI_GRO_CB(skb)->csum_cnt = 0;
4983 NAPI_GRO_CB(skb)->csum_valid = 0;
4984 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004985
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004986 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004987 break;
4988 }
4989 rcu_read_unlock();
4990
4991 if (&ptype->list == head)
4992 goto normal;
4993
Steffen Klassert25393d32017-02-15 09:39:44 +01004994 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4995 ret = GRO_CONSUMED;
4996 goto ok;
4997 }
4998
Herbert Xu0da2afd52008-12-26 14:57:42 -08004999 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005000 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08005001
Herbert Xud565b0a2008-12-15 23:38:52 -08005002 if (pp) {
5003 struct sk_buff *nskb = *pp;
5004
5005 *pp = nskb->next;
5006 nskb->next = NULL;
5007 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00005008 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08005009 }
5010
Herbert Xu0da2afd52008-12-26 14:57:42 -08005011 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08005012 goto ok;
5013
Eric Dumazet600adc12014-01-09 14:12:19 -08005014 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08005015 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08005016
Eric Dumazet600adc12014-01-09 14:12:19 -08005017 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
5018 struct sk_buff *nskb = napi->gro_list;
5019
5020 /* locate the end of the list to select the 'oldest' flow */
5021 while (nskb->next) {
5022 pp = &nskb->next;
5023 nskb = *pp;
5024 }
5025 *pp = NULL;
5026 nskb->next = NULL;
5027 napi_gro_complete(nskb);
5028 } else {
5029 napi->gro_count++;
5030 }
Herbert Xud565b0a2008-12-15 23:38:52 -08005031 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00005032 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07005033 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00005034 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08005035 skb->next = napi->gro_list;
5036 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005037 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08005038
Herbert Xuad0f9902009-02-01 01:24:55 -08005039pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07005040 grow = skb_gro_offset(skb) - skb_headlen(skb);
5041 if (grow > 0)
5042 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08005043ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005044 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08005045
5046normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08005047 ret = GRO_NORMAL;
5048 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08005049}
Herbert Xu96e93ea2009-01-06 10:49:34 -08005050
Jerry Chubf5a7552014-01-07 10:23:19 -08005051struct packet_offload *gro_find_receive_by_type(__be16 type)
5052{
5053 struct list_head *offload_head = &offload_base;
5054 struct packet_offload *ptype;
5055
5056 list_for_each_entry_rcu(ptype, offload_head, list) {
5057 if (ptype->type != type || !ptype->callbacks.gro_receive)
5058 continue;
5059 return ptype;
5060 }
5061 return NULL;
5062}
Or Gerlitze27a2f82014-01-20 13:59:20 +02005063EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08005064
5065struct packet_offload *gro_find_complete_by_type(__be16 type)
5066{
5067 struct list_head *offload_head = &offload_base;
5068 struct packet_offload *ptype;
5069
5070 list_for_each_entry_rcu(ptype, offload_head, list) {
5071 if (ptype->type != type || !ptype->callbacks.gro_complete)
5072 continue;
5073 return ptype;
5074 }
5075 return NULL;
5076}
Or Gerlitze27a2f82014-01-20 13:59:20 +02005077EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08005078
Michal Kubečeke44699d2017-06-29 11:13:36 +02005079static void napi_skb_free_stolen_head(struct sk_buff *skb)
5080{
5081 skb_dst_drop(skb);
5082 secpath_reset(skb);
5083 kmem_cache_free(skbuff_head_cache, skb);
5084}
5085
Rami Rosenbb728822012-11-28 21:55:25 +00005086static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08005087{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005088 switch (ret) {
5089 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005090 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005091 ret = GRO_DROP;
5092 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08005093
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005094 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08005095 kfree_skb(skb);
5096 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00005097
Eric Dumazetdaa86542012-04-19 07:07:40 +00005098 case GRO_MERGED_FREE:
Michal Kubečeke44699d2017-06-29 11:13:36 +02005099 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5100 napi_skb_free_stolen_head(skb);
5101 else
Eric Dumazetd7e88832012-04-30 08:10:34 +00005102 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00005103 break;
5104
Ben Hutchings5b252f02009-10-29 07:17:09 +00005105 case GRO_HELD:
5106 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01005107 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00005108 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08005109 }
5110
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005111 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005112}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005113
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005114gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005115{
Eric Dumazet93f93a42015-11-18 06:30:59 -08005116 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005117 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00005118
Eric Dumazeta50e2332014-03-29 21:28:21 -07005119 skb_gro_reset_offset(skb);
5120
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005121 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08005122}
5123EXPORT_SYMBOL(napi_gro_receive);
5124
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00005125static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08005126{
Eric Dumazet93a35f52014-10-23 06:30:30 -07005127 if (unlikely(skb->pfmemalloc)) {
5128 consume_skb(skb);
5129 return;
5130 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08005131 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00005132 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5133 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00005134 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08005135 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08005136 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07005137 skb->encapsulation = 0;
5138 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07005139 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Steffen Klassertf991bb92017-01-30 06:45:38 +01005140 secpath_reset(skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08005141
5142 napi->skb = skb;
5143}
Herbert Xu96e93ea2009-01-06 10:49:34 -08005144
Herbert Xu76620aa2009-04-16 02:02:07 -07005145struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08005146{
Herbert Xu5d38a072009-01-04 16:13:40 -08005147 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08005148
5149 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08005150 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08005151 if (skb) {
5152 napi->skb = skb;
5153 skb_mark_napi_id(skb, napi);
5154 }
Herbert Xu5d38a072009-01-04 16:13:40 -08005155 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08005156 return skb;
5157}
Herbert Xu76620aa2009-04-16 02:02:07 -07005158EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08005159
Eric Dumazeta50e2332014-03-29 21:28:21 -07005160static gro_result_t napi_frags_finish(struct napi_struct *napi,
5161 struct sk_buff *skb,
5162 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005163{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005164 switch (ret) {
5165 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07005166 case GRO_HELD:
5167 __skb_push(skb, ETH_HLEN);
5168 skb->protocol = eth_type_trans(skb, skb->dev);
5169 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005170 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00005171 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005172
5173 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005174 napi_reuse_skb(napi, skb);
5175 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00005176
Michal Kubečeke44699d2017-06-29 11:13:36 +02005177 case GRO_MERGED_FREE:
5178 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5179 napi_skb_free_stolen_head(skb);
5180 else
5181 napi_reuse_skb(napi, skb);
5182 break;
5183
Ben Hutchings5b252f02009-10-29 07:17:09 +00005184 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01005185 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00005186 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005187 }
5188
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005189 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005190}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005191
Eric Dumazeta50e2332014-03-29 21:28:21 -07005192/* Upper GRO stack assumes network header starts at gro_offset=0
5193 * Drivers could call both napi_gro_frags() and napi_gro_receive()
5194 * We copy ethernet header into skb->data to have a common layout.
5195 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00005196static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08005197{
Herbert Xu76620aa2009-04-16 02:02:07 -07005198 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07005199 const struct ethhdr *eth;
5200 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07005201
5202 napi->skb = NULL;
5203
Eric Dumazeta50e2332014-03-29 21:28:21 -07005204 skb_reset_mac_header(skb);
5205 skb_gro_reset_offset(skb);
5206
5207 eth = skb_gro_header_fast(skb, 0);
5208 if (unlikely(skb_gro_header_hard(skb, hlen))) {
5209 eth = skb_gro_header_slow(skb, hlen, 0);
5210 if (unlikely(!eth)) {
Aaron Conole4da46ce2016-04-02 15:26:43 -04005211 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5212 __func__, napi->dev->name);
Eric Dumazeta50e2332014-03-29 21:28:21 -07005213 napi_reuse_skb(napi, skb);
5214 return NULL;
5215 }
5216 } else {
5217 gro_pull_from_frag0(skb, hlen);
5218 NAPI_GRO_CB(skb)->frag0 += hlen;
5219 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07005220 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07005221 __skb_pull(skb, hlen);
5222
5223 /*
5224 * This works because the only protocols we care about don't require
5225 * special handling.
5226 * We'll fix it up properly in napi_frags_finish()
5227 */
5228 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07005229
Herbert Xu76620aa2009-04-16 02:02:07 -07005230 return skb;
5231}
Herbert Xu76620aa2009-04-16 02:02:07 -07005232
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005233gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07005234{
5235 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08005236
5237 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005238 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08005239
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005240 trace_napi_gro_frags_entry(skb);
5241
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005242 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08005243}
5244EXPORT_SYMBOL(napi_gro_frags);
5245
Tom Herbert573e8fc2014-08-22 13:33:47 -07005246/* Compute the checksum from gro_offset and return the folded value
5247 * after adding in any pseudo checksum.
5248 */
5249__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5250{
5251 __wsum wsum;
5252 __sum16 sum;
5253
5254 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5255
5256 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5257 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5258 if (likely(!sum)) {
5259 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5260 !skb->csum_complete_sw)
5261 netdev_rx_csum_fault(skb->dev);
5262 }
5263
5264 NAPI_GRO_CB(skb)->csum = wsum;
5265 NAPI_GRO_CB(skb)->csum_valid = 1;
5266
5267 return sum;
5268}
5269EXPORT_SYMBOL(__skb_gro_checksum_complete);
5270
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05305271static void net_rps_send_ipi(struct softnet_data *remsd)
5272{
5273#ifdef CONFIG_RPS
5274 while (remsd) {
5275 struct softnet_data *next = remsd->rps_ipi_next;
5276
5277 if (cpu_online(remsd->cpu))
5278 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5279 remsd = next;
5280 }
5281#endif
5282}
5283
Eric Dumazete326bed2010-04-22 00:22:45 -07005284/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08005285 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07005286 * Note: called with local irq disabled, but exits with local irq enabled.
5287 */
5288static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5289{
5290#ifdef CONFIG_RPS
5291 struct softnet_data *remsd = sd->rps_ipi_list;
5292
5293 if (remsd) {
5294 sd->rps_ipi_list = NULL;
5295
5296 local_irq_enable();
5297
5298 /* Send pending IPI's to kick RPS processing on remote cpus. */
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05305299 net_rps_send_ipi(remsd);
Eric Dumazete326bed2010-04-22 00:22:45 -07005300 } else
5301#endif
5302 local_irq_enable();
5303}
5304
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005305static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5306{
5307#ifdef CONFIG_RPS
5308 return sd->rps_ipi_list != NULL;
5309#else
5310 return false;
5311#endif
5312}
5313
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005314static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315{
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005316 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005317 bool again = true;
5318 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319
Eric Dumazete326bed2010-04-22 00:22:45 -07005320 /* Check if we have pending ipi, its better to send them now,
5321 * not waiting net_rx_action() end.
5322 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005323 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07005324 local_irq_disable();
5325 net_rps_action_and_irq_enable(sd);
5326 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005327
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01005328 napi->weight = dev_rx_weight;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005329 while (again) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331
Changli Gao6e7676c2010-04-27 15:07:33 -07005332 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03005333 rcu_read_lock();
Changli Gao6e7676c2010-04-27 15:07:33 -07005334 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03005335 rcu_read_unlock();
Tom Herbert76cc8b12010-05-20 18:37:59 +00005336 input_queue_head_incr(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005337 if (++work >= quota)
Tom Herbert76cc8b12010-05-20 18:37:59 +00005338 return work;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005339
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005340 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005342 local_irq_disable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005343 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07005344 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005345 /*
5346 * Inline a custom version of __napi_complete().
5347 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07005348 * and NAPI_STATE_SCHED is the only possible flag set
5349 * on backlog.
5350 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005351 * and we dont need an smp_mb() memory barrier.
5352 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005353 napi->state = 0;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005354 again = false;
5355 } else {
5356 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5357 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07005358 }
5359 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005360 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005362
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005363 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005364}
5365
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005366/**
5367 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005368 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005369 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005370 * The entry's receive function will be scheduled to run.
5371 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005372 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08005373void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005374{
5375 unsigned long flags;
5376
5377 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05005378 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005379 local_irq_restore(flags);
5380}
5381EXPORT_SYMBOL(__napi_schedule);
5382
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005383/**
Eric Dumazet39e6c822017-02-28 10:34:50 -08005384 * napi_schedule_prep - check if napi can be scheduled
5385 * @n: napi context
5386 *
5387 * Test if NAPI routine is already running, and if not mark
5388 * it as running. This is used as a condition variable
5389 * insure only one NAPI poll instance runs. We also make
5390 * sure there is no pending NAPI disable.
5391 */
5392bool napi_schedule_prep(struct napi_struct *n)
5393{
5394 unsigned long val, new;
5395
5396 do {
5397 val = READ_ONCE(n->state);
5398 if (unlikely(val & NAPIF_STATE_DISABLE))
5399 return false;
5400 new = val | NAPIF_STATE_SCHED;
5401
5402 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5403 * This was suggested by Alexander Duyck, as compiler
5404 * emits better code than :
5405 * if (val & NAPIF_STATE_SCHED)
5406 * new |= NAPIF_STATE_MISSED;
5407 */
5408 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5409 NAPIF_STATE_MISSED;
5410 } while (cmpxchg(&n->state, val, new) != val);
5411
5412 return !(val & NAPIF_STATE_SCHED);
5413}
5414EXPORT_SYMBOL(napi_schedule_prep);
5415
5416/**
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005417 * __napi_schedule_irqoff - schedule for receive
5418 * @n: entry to schedule
5419 *
5420 * Variant of __napi_schedule() assuming hard irqs are masked
5421 */
5422void __napi_schedule_irqoff(struct napi_struct *n)
5423{
5424 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5425}
5426EXPORT_SYMBOL(__napi_schedule_irqoff);
5427
Eric Dumazet364b6052016-11-15 10:15:13 -08005428bool napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08005429{
Eric Dumazet39e6c822017-02-28 10:34:50 -08005430 unsigned long flags, val, new;
Herbert Xud565b0a2008-12-15 23:38:52 -08005431
5432 /*
Eric Dumazet217f6972016-11-15 10:15:11 -08005433 * 1) Don't let napi dequeue from the cpu poll list
5434 * just in case its running on a different cpu.
5435 * 2) If we are busy polling, do nothing here, we have
5436 * the guarantee we will be called later.
Herbert Xud565b0a2008-12-15 23:38:52 -08005437 */
Eric Dumazet217f6972016-11-15 10:15:11 -08005438 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5439 NAPIF_STATE_IN_BUSY_POLL)))
Eric Dumazet364b6052016-11-15 10:15:13 -08005440 return false;
Herbert Xud565b0a2008-12-15 23:38:52 -08005441
Eric Dumazet3b47d302014-11-06 21:09:44 -08005442 if (n->gro_list) {
5443 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005444
Eric Dumazet3b47d302014-11-06 21:09:44 -08005445 if (work_done)
5446 timeout = n->dev->gro_flush_timeout;
5447
5448 if (timeout)
5449 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5450 HRTIMER_MODE_REL_PINNED);
5451 else
5452 napi_gro_flush(n, false);
5453 }
Eric Dumazet02c16022017-02-04 15:25:02 -08005454 if (unlikely(!list_empty(&n->poll_list))) {
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005455 /* If n->poll_list is not empty, we need to mask irqs */
5456 local_irq_save(flags);
Eric Dumazet02c16022017-02-04 15:25:02 -08005457 list_del_init(&n->poll_list);
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005458 local_irq_restore(flags);
5459 }
Eric Dumazet39e6c822017-02-28 10:34:50 -08005460
5461 do {
5462 val = READ_ONCE(n->state);
5463
5464 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5465
5466 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5467
5468 /* If STATE_MISSED was set, leave STATE_SCHED set,
5469 * because we will call napi->poll() one more time.
5470 * This C code was suggested by Alexander Duyck to help gcc.
5471 */
5472 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5473 NAPIF_STATE_SCHED;
5474 } while (cmpxchg(&n->state, val, new) != val);
5475
5476 if (unlikely(val & NAPIF_STATE_MISSED)) {
5477 __napi_schedule(n);
5478 return false;
5479 }
5480
Eric Dumazet364b6052016-11-15 10:15:13 -08005481 return true;
Herbert Xud565b0a2008-12-15 23:38:52 -08005482}
Eric Dumazet3b47d302014-11-06 21:09:44 -08005483EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08005484
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005485/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08005486static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005487{
5488 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5489 struct napi_struct *napi;
5490
5491 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5492 if (napi->napi_id == napi_id)
5493 return napi;
5494
5495 return NULL;
5496}
Eric Dumazet02d62e82015-11-18 06:30:52 -08005497
5498#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazet217f6972016-11-15 10:15:11 -08005499
Eric Dumazetce6aea92015-11-18 06:30:54 -08005500#define BUSY_POLL_BUDGET 8
Eric Dumazet217f6972016-11-15 10:15:11 -08005501
5502static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5503{
5504 int rc;
5505
Eric Dumazet39e6c822017-02-28 10:34:50 -08005506 /* Busy polling means there is a high chance device driver hard irq
5507 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5508 * set in napi_schedule_prep().
5509 * Since we are about to call napi->poll() once more, we can safely
5510 * clear NAPI_STATE_MISSED.
5511 *
5512 * Note: x86 could use a single "lock and ..." instruction
5513 * to perform these two clear_bit()
5514 */
5515 clear_bit(NAPI_STATE_MISSED, &napi->state);
Eric Dumazet217f6972016-11-15 10:15:11 -08005516 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5517
5518 local_bh_disable();
5519
5520 /* All we really want here is to re-enable device interrupts.
5521 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5522 */
5523 rc = napi->poll(napi, BUSY_POLL_BUDGET);
Jesper Dangaard Brouer1e223912017-08-25 15:04:32 +02005524 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
Eric Dumazet217f6972016-11-15 10:15:11 -08005525 netpoll_poll_unlock(have_poll_lock);
5526 if (rc == BUSY_POLL_BUDGET)
5527 __napi_schedule(napi);
5528 local_bh_enable();
Eric Dumazet217f6972016-11-15 10:15:11 -08005529}
5530
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005531void napi_busy_loop(unsigned int napi_id,
5532 bool (*loop_end)(void *, unsigned long),
5533 void *loop_end_arg)
Eric Dumazet02d62e82015-11-18 06:30:52 -08005534{
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005535 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
Eric Dumazet217f6972016-11-15 10:15:11 -08005536 int (*napi_poll)(struct napi_struct *napi, int budget);
Eric Dumazet217f6972016-11-15 10:15:11 -08005537 void *have_poll_lock = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005538 struct napi_struct *napi;
Eric Dumazet217f6972016-11-15 10:15:11 -08005539
5540restart:
Eric Dumazet217f6972016-11-15 10:15:11 -08005541 napi_poll = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005542
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005543 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005544
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005545 napi = napi_by_id(napi_id);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005546 if (!napi)
5547 goto out;
5548
Eric Dumazet217f6972016-11-15 10:15:11 -08005549 preempt_disable();
5550 for (;;) {
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005551 int work = 0;
5552
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005553 local_bh_disable();
Eric Dumazet217f6972016-11-15 10:15:11 -08005554 if (!napi_poll) {
5555 unsigned long val = READ_ONCE(napi->state);
5556
5557 /* If multiple threads are competing for this napi,
5558 * we avoid dirtying napi->state as much as we can.
5559 */
5560 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5561 NAPIF_STATE_IN_BUSY_POLL))
5562 goto count;
5563 if (cmpxchg(&napi->state, val,
5564 val | NAPIF_STATE_IN_BUSY_POLL |
5565 NAPIF_STATE_SCHED) != val)
5566 goto count;
5567 have_poll_lock = netpoll_poll_lock(napi);
5568 napi_poll = napi->poll;
5569 }
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005570 work = napi_poll(napi, BUSY_POLL_BUDGET);
5571 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
Eric Dumazet217f6972016-11-15 10:15:11 -08005572count:
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005573 if (work > 0)
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005574 __NET_ADD_STATS(dev_net(napi->dev),
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005575 LINUX_MIB_BUSYPOLLRXPACKETS, work);
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005576 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005577
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005578 if (!loop_end || loop_end(loop_end_arg, start_time))
Eric Dumazet217f6972016-11-15 10:15:11 -08005579 break;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005580
Eric Dumazet217f6972016-11-15 10:15:11 -08005581 if (unlikely(need_resched())) {
5582 if (napi_poll)
5583 busy_poll_stop(napi, have_poll_lock);
5584 preempt_enable();
5585 rcu_read_unlock();
5586 cond_resched();
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005587 if (loop_end(loop_end_arg, start_time))
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005588 return;
Eric Dumazet217f6972016-11-15 10:15:11 -08005589 goto restart;
5590 }
Linus Torvalds6cdf89b2016-12-12 10:48:02 -08005591 cpu_relax();
Eric Dumazet217f6972016-11-15 10:15:11 -08005592 }
5593 if (napi_poll)
5594 busy_poll_stop(napi, have_poll_lock);
5595 preempt_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005596out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005597 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005598}
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005599EXPORT_SYMBOL(napi_busy_loop);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005600
5601#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005602
Eric Dumazet149d6ad2016-11-08 11:07:28 -08005603static void napi_hash_add(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005604{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005605 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5606 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005607 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005608
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005609 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005610
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005611 /* 0..NR_CPUS range is reserved for sender_cpu use */
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005612 do {
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005613 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5614 napi_gen_id = MIN_NAPI_ID;
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005615 } while (napi_by_id(napi_gen_id));
5616 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005617
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005618 hlist_add_head_rcu(&napi->napi_hash_node,
5619 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005620
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005621 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005622}
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005623
5624/* Warning : caller is responsible to make sure rcu grace period
5625 * is respected before freeing memory containing @napi
5626 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08005627bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005628{
Eric Dumazet34cbe272015-11-18 06:31:02 -08005629 bool rcu_sync_needed = false;
5630
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005631 spin_lock(&napi_hash_lock);
5632
Eric Dumazet34cbe272015-11-18 06:31:02 -08005633 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5634 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005635 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005636 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005637 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005638 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005639}
5640EXPORT_SYMBOL_GPL(napi_hash_del);
5641
Eric Dumazet3b47d302014-11-06 21:09:44 -08005642static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5643{
5644 struct napi_struct *napi;
5645
5646 napi = container_of(timer, struct napi_struct, timer);
Eric Dumazet39e6c822017-02-28 10:34:50 -08005647
5648 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5649 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5650 */
5651 if (napi->gro_list && !napi_disable_pending(napi) &&
5652 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5653 __napi_schedule_irqoff(napi);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005654
5655 return HRTIMER_NORESTART;
5656}
5657
Herbert Xud565b0a2008-12-15 23:38:52 -08005658void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5659 int (*poll)(struct napi_struct *, int), int weight)
5660{
5661 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005662 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5663 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00005664 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005665 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08005666 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08005667 napi->poll = poll;
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00005668 if (weight > NAPI_POLL_WEIGHT)
5669 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5670 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08005671 napi->weight = weight;
5672 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005673 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08005674#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08005675 napi->poll_owner = -1;
5676#endif
5677 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08005678 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005679}
5680EXPORT_SYMBOL(netif_napi_add);
5681
Eric Dumazet3b47d302014-11-06 21:09:44 -08005682void napi_disable(struct napi_struct *n)
5683{
5684 might_sleep();
5685 set_bit(NAPI_STATE_DISABLE, &n->state);
5686
5687 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5688 msleep(1);
Neil Horman2d8bff1262015-09-23 14:57:58 -04005689 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5690 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005691
5692 hrtimer_cancel(&n->timer);
5693
5694 clear_bit(NAPI_STATE_DISABLE, &n->state);
5695}
5696EXPORT_SYMBOL(napi_disable);
5697
Eric Dumazet93d05d42015-11-18 06:31:03 -08005698/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08005699void netif_napi_del(struct napi_struct *napi)
5700{
Eric Dumazet93d05d42015-11-18 06:31:03 -08005701 might_sleep();
5702 if (napi_hash_del(napi))
5703 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08005704 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07005705 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005706
Eric Dumazet289dccb2013-12-20 14:29:08 -08005707 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005708 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005709 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005710}
5711EXPORT_SYMBOL(netif_napi_del);
5712
Herbert Xu726ce702014-12-21 07:16:21 +11005713static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5714{
5715 void *have;
5716 int work, weight;
5717
5718 list_del_init(&n->poll_list);
5719
5720 have = netpoll_poll_lock(n);
5721
5722 weight = n->weight;
5723
5724 /* This NAPI_STATE_SCHED test is for avoiding a race
5725 * with netpoll's poll_napi(). Only the entity which
5726 * obtains the lock and sees NAPI_STATE_SCHED set will
5727 * actually make the ->poll() call. Therefore we avoid
5728 * accidentally calling ->poll() when NAPI is not scheduled.
5729 */
5730 work = 0;
5731 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5732 work = n->poll(n, weight);
Jesper Dangaard Brouer1db19db2016-07-07 18:01:32 +02005733 trace_napi_poll(n, work, weight);
Herbert Xu726ce702014-12-21 07:16:21 +11005734 }
5735
5736 WARN_ON_ONCE(work > weight);
5737
5738 if (likely(work < weight))
5739 goto out_unlock;
5740
5741 /* Drivers must not modify the NAPI state if they
5742 * consume the entire weight. In such cases this code
5743 * still "owns" the NAPI instance and therefore can
5744 * move the instance around on the list at-will.
5745 */
5746 if (unlikely(napi_disable_pending(n))) {
5747 napi_complete(n);
5748 goto out_unlock;
5749 }
5750
5751 if (n->gro_list) {
5752 /* flush too old packets
5753 * If HZ < 1000, flush all packets.
5754 */
5755 napi_gro_flush(n, HZ >= 1000);
5756 }
5757
Herbert Xu001ce542014-12-21 07:16:22 +11005758 /* Some drivers may have called napi_schedule
5759 * prior to exhausting their budget.
5760 */
5761 if (unlikely(!list_empty(&n->poll_list))) {
5762 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5763 n->dev ? n->dev->name : "backlog");
5764 goto out_unlock;
5765 }
5766
Herbert Xu726ce702014-12-21 07:16:21 +11005767 list_add_tail(&n->poll_list, repoll);
5768
5769out_unlock:
5770 netpoll_poll_unlock(have);
5771
5772 return work;
5773}
5774
Emese Revfy0766f782016-06-20 20:42:34 +02005775static __latent_entropy void net_rx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005776{
Christoph Lameter903ceff2014-08-17 12:30:35 -05005777 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04005778 unsigned long time_limit = jiffies +
5779 usecs_to_jiffies(netdev_budget_usecs);
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07005780 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005781 LIST_HEAD(list);
5782 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07005783
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005785 list_splice_init(&sd->poll_list, &list);
5786 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005787
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005788 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005789 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005791 if (list_empty(&list)) {
5792 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005793 goto out;
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005794 break;
5795 }
5796
Herbert Xu6bd373e2014-12-21 07:16:24 +11005797 n = list_first_entry(&list, struct napi_struct, poll_list);
5798 budget -= napi_poll(n, &repoll);
5799
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005800 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005801 * Allow this to run for 2 jiffies since which will allow
5802 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005803 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005804 if (unlikely(budget <= 0 ||
5805 time_after_eq(jiffies, time_limit))) {
5806 sd->time_squeeze++;
5807 break;
5808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005810
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005811 local_irq_disable();
5812
5813 list_splice_tail_init(&sd->poll_list, &list);
5814 list_splice_tail(&repoll, &list);
5815 list_splice(&list, &sd->poll_list);
5816 if (!list_empty(&sd->poll_list))
5817 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5818
Eric Dumazete326bed2010-04-22 00:22:45 -07005819 net_rps_action_and_irq_enable(sd);
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005820out:
5821 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005822}
5823
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005824struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005825 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005826
5827 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005828 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005829
Veaceslav Falico5d261912013-08-28 23:25:05 +02005830 /* counter for the number of times this device was added to us */
5831 u16 ref_nr;
5832
Veaceslav Falico402dae92013-09-25 09:20:09 +02005833 /* private field for the users */
5834 void *private;
5835
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005836 struct list_head list;
5837 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005838};
5839
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005840static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005841 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005842{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005843 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005844
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005845 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005846 if (adj->dev == adj_dev)
5847 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005848 }
5849 return NULL;
5850}
5851
David Ahernf1170fd2016-10-17 19:15:51 -07005852static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5853{
5854 struct net_device *dev = data;
5855
5856 return upper_dev == dev;
5857}
5858
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005859/**
5860 * netdev_has_upper_dev - Check if device is linked to an upper device
5861 * @dev: device
5862 * @upper_dev: upper device to check
5863 *
5864 * Find out if a device is linked to specified upper device and return true
5865 * in case it is. Note that this checks only immediate upper device,
5866 * not through a complete stack of devices. The caller must hold the RTNL lock.
5867 */
5868bool netdev_has_upper_dev(struct net_device *dev,
5869 struct net_device *upper_dev)
5870{
5871 ASSERT_RTNL();
5872
David Ahernf1170fd2016-10-17 19:15:51 -07005873 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5874 upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005875}
5876EXPORT_SYMBOL(netdev_has_upper_dev);
5877
5878/**
David Ahern1a3f0602016-10-17 19:15:44 -07005879 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5880 * @dev: device
5881 * @upper_dev: upper device to check
5882 *
5883 * Find out if a device is linked to specified upper device and return true
5884 * in case it is. Note that this checks the entire upper device chain.
5885 * The caller must hold rcu lock.
5886 */
5887
David Ahern1a3f0602016-10-17 19:15:44 -07005888bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5889 struct net_device *upper_dev)
5890{
5891 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5892 upper_dev);
5893}
5894EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5895
5896/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005897 * netdev_has_any_upper_dev - Check if device is linked to some device
5898 * @dev: device
5899 *
5900 * Find out if a device is linked to an upper device and return true in case
5901 * it is. The caller must hold the RTNL lock.
5902 */
Ido Schimmel25cc72a2017-09-01 10:52:31 +02005903bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005904{
5905 ASSERT_RTNL();
5906
David Ahernf1170fd2016-10-17 19:15:51 -07005907 return !list_empty(&dev->adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005908}
Ido Schimmel25cc72a2017-09-01 10:52:31 +02005909EXPORT_SYMBOL(netdev_has_any_upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005910
5911/**
5912 * netdev_master_upper_dev_get - Get master upper device
5913 * @dev: device
5914 *
5915 * Find a master upper device and return pointer to it or NULL in case
5916 * it's not there. The caller must hold the RTNL lock.
5917 */
5918struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5919{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005920 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005921
5922 ASSERT_RTNL();
5923
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005924 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005925 return NULL;
5926
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005927 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005928 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005929 if (likely(upper->master))
5930 return upper->dev;
5931 return NULL;
5932}
5933EXPORT_SYMBOL(netdev_master_upper_dev_get);
5934
David Ahern0f524a82016-10-17 19:15:52 -07005935/**
5936 * netdev_has_any_lower_dev - Check if device is linked to some device
5937 * @dev: device
5938 *
5939 * Find out if a device is linked to a lower device and return true in case
5940 * it is. The caller must hold the RTNL lock.
5941 */
5942static bool netdev_has_any_lower_dev(struct net_device *dev)
5943{
5944 ASSERT_RTNL();
5945
5946 return !list_empty(&dev->adj_list.lower);
5947}
5948
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02005949void *netdev_adjacent_get_private(struct list_head *adj_list)
5950{
5951 struct netdev_adjacent *adj;
5952
5953 adj = list_entry(adj_list, struct netdev_adjacent, list);
5954
5955 return adj->private;
5956}
5957EXPORT_SYMBOL(netdev_adjacent_get_private);
5958
Veaceslav Falico31088a12013-09-25 09:20:12 +02005959/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04005960 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5961 * @dev: device
5962 * @iter: list_head ** of the current position
5963 *
5964 * Gets the next device from the dev's upper list, starting from iter
5965 * position. The caller must hold RCU read lock.
5966 */
5967struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5968 struct list_head **iter)
5969{
5970 struct netdev_adjacent *upper;
5971
5972 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5973
5974 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5975
5976 if (&upper->list == &dev->adj_list.upper)
5977 return NULL;
5978
5979 *iter = &upper->list;
5980
5981 return upper->dev;
5982}
5983EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5984
David Ahern1a3f0602016-10-17 19:15:44 -07005985static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5986 struct list_head **iter)
5987{
5988 struct netdev_adjacent *upper;
5989
5990 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5991
5992 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5993
5994 if (&upper->list == &dev->adj_list.upper)
5995 return NULL;
5996
5997 *iter = &upper->list;
5998
5999 return upper->dev;
6000}
6001
6002int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6003 int (*fn)(struct net_device *dev,
6004 void *data),
6005 void *data)
6006{
6007 struct net_device *udev;
6008 struct list_head *iter;
6009 int ret;
6010
6011 for (iter = &dev->adj_list.upper,
6012 udev = netdev_next_upper_dev_rcu(dev, &iter);
6013 udev;
6014 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6015 /* first is the upper device itself */
6016 ret = fn(udev, data);
6017 if (ret)
6018 return ret;
6019
6020 /* then look at all of its upper devices */
6021 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
6022 if (ret)
6023 return ret;
6024 }
6025
6026 return 0;
6027}
6028EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6029
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006030/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02006031 * netdev_lower_get_next_private - Get the next ->private from the
6032 * lower neighbour list
6033 * @dev: device
6034 * @iter: list_head ** of the current position
6035 *
6036 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6037 * list, starting from iter position. The caller must hold either hold the
6038 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00006039 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02006040 */
6041void *netdev_lower_get_next_private(struct net_device *dev,
6042 struct list_head **iter)
6043{
6044 struct netdev_adjacent *lower;
6045
6046 lower = list_entry(*iter, struct netdev_adjacent, list);
6047
6048 if (&lower->list == &dev->adj_list.lower)
6049 return NULL;
6050
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02006051 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02006052
6053 return lower->private;
6054}
6055EXPORT_SYMBOL(netdev_lower_get_next_private);
6056
6057/**
6058 * netdev_lower_get_next_private_rcu - Get the next ->private from the
6059 * lower neighbour list, RCU
6060 * variant
6061 * @dev: device
6062 * @iter: list_head ** of the current position
6063 *
6064 * Gets the next netdev_adjacent->private from the dev's lower neighbour
6065 * list, starting from iter position. The caller must hold RCU read lock.
6066 */
6067void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6068 struct list_head **iter)
6069{
6070 struct netdev_adjacent *lower;
6071
6072 WARN_ON_ONCE(!rcu_read_lock_held());
6073
6074 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6075
6076 if (&lower->list == &dev->adj_list.lower)
6077 return NULL;
6078
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02006079 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02006080
6081 return lower->private;
6082}
6083EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
6084
6085/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006086 * netdev_lower_get_next - Get the next device from the lower neighbour
6087 * list
6088 * @dev: device
6089 * @iter: list_head ** of the current position
6090 *
6091 * Gets the next netdev_adjacent from the dev's lower neighbour
6092 * list, starting from iter position. The caller must hold RTNL lock or
6093 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00006094 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006095 */
6096void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
6097{
6098 struct netdev_adjacent *lower;
6099
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01006100 lower = list_entry(*iter, struct netdev_adjacent, list);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006101
6102 if (&lower->list == &dev->adj_list.lower)
6103 return NULL;
6104
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01006105 *iter = lower->list.next;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006106
6107 return lower->dev;
6108}
6109EXPORT_SYMBOL(netdev_lower_get_next);
6110
David Ahern1a3f0602016-10-17 19:15:44 -07006111static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6112 struct list_head **iter)
6113{
6114 struct netdev_adjacent *lower;
6115
David Ahern46b5ab12016-10-26 13:21:33 -07006116 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
David Ahern1a3f0602016-10-17 19:15:44 -07006117
6118 if (&lower->list == &dev->adj_list.lower)
6119 return NULL;
6120
David Ahern46b5ab12016-10-26 13:21:33 -07006121 *iter = &lower->list;
David Ahern1a3f0602016-10-17 19:15:44 -07006122
6123 return lower->dev;
6124}
6125
6126int netdev_walk_all_lower_dev(struct net_device *dev,
6127 int (*fn)(struct net_device *dev,
6128 void *data),
6129 void *data)
6130{
6131 struct net_device *ldev;
6132 struct list_head *iter;
6133 int ret;
6134
6135 for (iter = &dev->adj_list.lower,
6136 ldev = netdev_next_lower_dev(dev, &iter);
6137 ldev;
6138 ldev = netdev_next_lower_dev(dev, &iter)) {
6139 /* first is the lower device itself */
6140 ret = fn(ldev, data);
6141 if (ret)
6142 return ret;
6143
6144 /* then look at all of its lower devices */
6145 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6146 if (ret)
6147 return ret;
6148 }
6149
6150 return 0;
6151}
6152EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6153
David Ahern1a3f0602016-10-17 19:15:44 -07006154static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6155 struct list_head **iter)
6156{
6157 struct netdev_adjacent *lower;
6158
6159 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6160 if (&lower->list == &dev->adj_list.lower)
6161 return NULL;
6162
6163 *iter = &lower->list;
6164
6165 return lower->dev;
6166}
6167
6168int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6169 int (*fn)(struct net_device *dev,
6170 void *data),
6171 void *data)
6172{
6173 struct net_device *ldev;
6174 struct list_head *iter;
6175 int ret;
6176
6177 for (iter = &dev->adj_list.lower,
6178 ldev = netdev_next_lower_dev_rcu(dev, &iter);
6179 ldev;
6180 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6181 /* first is the lower device itself */
6182 ret = fn(ldev, data);
6183 if (ret)
6184 return ret;
6185
6186 /* then look at all of its lower devices */
6187 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6188 if (ret)
6189 return ret;
6190 }
6191
6192 return 0;
6193}
6194EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6195
Jiri Pirko7ce856a2016-07-04 08:23:12 +02006196/**
dingtianhonge001bfa2013-12-13 10:19:55 +08006197 * netdev_lower_get_first_private_rcu - Get the first ->private from the
6198 * lower neighbour list, RCU
6199 * variant
6200 * @dev: device
6201 *
6202 * Gets the first netdev_adjacent->private from the dev's lower neighbour
6203 * list. The caller must hold RCU read lock.
6204 */
6205void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6206{
6207 struct netdev_adjacent *lower;
6208
6209 lower = list_first_or_null_rcu(&dev->adj_list.lower,
6210 struct netdev_adjacent, list);
6211 if (lower)
6212 return lower->private;
6213 return NULL;
6214}
6215EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6216
6217/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006218 * netdev_master_upper_dev_get_rcu - Get master upper device
6219 * @dev: device
6220 *
6221 * Find a master upper device and return pointer to it or NULL in case
6222 * it's not there. The caller must hold the RCU read lock.
6223 */
6224struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6225{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02006226 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006227
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006228 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02006229 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006230 if (upper && likely(upper->master))
6231 return upper->dev;
6232 return NULL;
6233}
6234EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6235
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05306236static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006237 struct net_device *adj_dev,
6238 struct list_head *dev_list)
6239{
6240 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11006241
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006242 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6243 "upper_%s" : "lower_%s", adj_dev->name);
6244 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6245 linkname);
6246}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05306247static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006248 char *name,
6249 struct list_head *dev_list)
6250{
6251 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11006252
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006253 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6254 "upper_%s" : "lower_%s", name);
6255 sysfs_remove_link(&(dev->dev.kobj), linkname);
6256}
6257
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006258static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6259 struct net_device *adj_dev,
6260 struct list_head *dev_list)
6261{
6262 return (dev_list == &dev->adj_list.upper ||
6263 dev_list == &dev->adj_list.lower) &&
6264 net_eq(dev_net(dev), dev_net(adj_dev));
6265}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006266
Veaceslav Falico5d261912013-08-28 23:25:05 +02006267static int __netdev_adjacent_dev_insert(struct net_device *dev,
6268 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02006269 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006270 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006271{
6272 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006273 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006274
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006275 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006276
6277 if (adj) {
David Ahern790510d2016-10-17 19:15:43 -07006278 adj->ref_nr += 1;
David Ahern67b62f92016-10-17 19:15:53 -07006279 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6280 dev->name, adj_dev->name, adj->ref_nr);
6281
Veaceslav Falico5d261912013-08-28 23:25:05 +02006282 return 0;
6283 }
6284
6285 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6286 if (!adj)
6287 return -ENOMEM;
6288
6289 adj->dev = adj_dev;
6290 adj->master = master;
David Ahern790510d2016-10-17 19:15:43 -07006291 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006292 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006293 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006294
David Ahern67b62f92016-10-17 19:15:53 -07006295 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6296 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006297
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006298 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006299 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006300 if (ret)
6301 goto free_adj;
6302 }
6303
Veaceslav Falico7863c052013-09-25 09:20:06 +02006304 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006305 if (master) {
6306 ret = sysfs_create_link(&(dev->dev.kobj),
6307 &(adj_dev->dev.kobj), "master");
6308 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02006309 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006310
Veaceslav Falico7863c052013-09-25 09:20:06 +02006311 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006312 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02006313 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006314 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006315
6316 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006317
Veaceslav Falico5831d662013-09-25 09:20:32 +02006318remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006319 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006320 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006321free_adj:
6322 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02006323 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006324
6325 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006326}
6327
stephen hemminger1d143d92013-12-29 14:01:29 -08006328static void __netdev_adjacent_dev_remove(struct net_device *dev,
6329 struct net_device *adj_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006330 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006331 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006332{
6333 struct netdev_adjacent *adj;
6334
David Ahern67b62f92016-10-17 19:15:53 -07006335 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6336 dev->name, adj_dev->name, ref_nr);
6337
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006338 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006339
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006340 if (!adj) {
David Ahern67b62f92016-10-17 19:15:53 -07006341 pr_err("Adjacency does not exist for device %s from %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006342 dev->name, adj_dev->name);
David Ahern67b62f92016-10-17 19:15:53 -07006343 WARN_ON(1);
6344 return;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006345 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006346
Andrew Collins93409032016-10-03 13:43:02 -06006347 if (adj->ref_nr > ref_nr) {
David Ahern67b62f92016-10-17 19:15:53 -07006348 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6349 dev->name, adj_dev->name, ref_nr,
6350 adj->ref_nr - ref_nr);
Andrew Collins93409032016-10-03 13:43:02 -06006351 adj->ref_nr -= ref_nr;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006352 return;
6353 }
6354
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006355 if (adj->master)
6356 sysfs_remove_link(&(dev->dev.kobj), "master");
6357
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006358 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006359 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006360
Veaceslav Falico5d261912013-08-28 23:25:05 +02006361 list_del_rcu(&adj->list);
David Ahern67b62f92016-10-17 19:15:53 -07006362 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006363 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006364 dev_put(adj_dev);
6365 kfree_rcu(adj, rcu);
6366}
6367
stephen hemminger1d143d92013-12-29 14:01:29 -08006368static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6369 struct net_device *upper_dev,
6370 struct list_head *up_list,
6371 struct list_head *down_list,
6372 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006373{
6374 int ret;
6375
David Ahern790510d2016-10-17 19:15:43 -07006376 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
Andrew Collins93409032016-10-03 13:43:02 -06006377 private, master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006378 if (ret)
6379 return ret;
6380
David Ahern790510d2016-10-17 19:15:43 -07006381 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
Andrew Collins93409032016-10-03 13:43:02 -06006382 private, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006383 if (ret) {
David Ahern790510d2016-10-17 19:15:43 -07006384 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006385 return ret;
6386 }
6387
6388 return 0;
6389}
6390
stephen hemminger1d143d92013-12-29 14:01:29 -08006391static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6392 struct net_device *upper_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006393 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006394 struct list_head *up_list,
6395 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006396{
Andrew Collins93409032016-10-03 13:43:02 -06006397 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6398 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006399}
6400
stephen hemminger1d143d92013-12-29 14:01:29 -08006401static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6402 struct net_device *upper_dev,
6403 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006404{
David Ahernf1170fd2016-10-17 19:15:51 -07006405 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6406 &dev->adj_list.upper,
6407 &upper_dev->adj_list.lower,
6408 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006409}
6410
stephen hemminger1d143d92013-12-29 14:01:29 -08006411static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6412 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006413{
Andrew Collins93409032016-10-03 13:43:02 -06006414 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006415 &dev->adj_list.upper,
6416 &upper_dev->adj_list.lower);
6417}
Veaceslav Falico5d261912013-08-28 23:25:05 +02006418
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006419static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006420 struct net_device *upper_dev, bool master,
David Ahern42ab19e2017-10-04 17:48:47 -07006421 void *upper_priv, void *upper_info,
6422 struct netlink_ext_ack *extack)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006423{
David Ahern51d0c0472017-10-04 17:48:45 -07006424 struct netdev_notifier_changeupper_info changeupper_info = {
6425 .info = {
6426 .dev = dev,
David Ahern42ab19e2017-10-04 17:48:47 -07006427 .extack = extack,
David Ahern51d0c0472017-10-04 17:48:45 -07006428 },
6429 .upper_dev = upper_dev,
6430 .master = master,
6431 .linking = true,
6432 .upper_info = upper_info,
6433 };
Mike Manning50d629e2018-02-26 23:49:30 +00006434 struct net_device *master_dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006435 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006436
6437 ASSERT_RTNL();
6438
6439 if (dev == upper_dev)
6440 return -EBUSY;
6441
6442 /* To prevent loops, check if dev is not upper device to upper_dev. */
David Ahernf1170fd2016-10-17 19:15:51 -07006443 if (netdev_has_upper_dev(upper_dev, dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006444 return -EBUSY;
6445
Mike Manning50d629e2018-02-26 23:49:30 +00006446 if (!master) {
6447 if (netdev_has_upper_dev(dev, upper_dev))
6448 return -EEXIST;
6449 } else {
6450 master_dev = netdev_master_upper_dev_get(dev);
6451 if (master_dev)
6452 return master_dev == upper_dev ? -EEXIST : -EBUSY;
6453 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006454
David Ahern51d0c0472017-10-04 17:48:45 -07006455 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006456 &changeupper_info.info);
6457 ret = notifier_to_errno(ret);
6458 if (ret)
6459 return ret;
6460
Jiri Pirko6dffb042015-12-03 12:12:10 +01006461 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006462 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006463 if (ret)
6464 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006465
David Ahern51d0c0472017-10-04 17:48:45 -07006466 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
Ido Schimmelb03804e2015-12-03 12:12:03 +01006467 &changeupper_info.info);
6468 ret = notifier_to_errno(ret);
6469 if (ret)
David Ahernf1170fd2016-10-17 19:15:51 -07006470 goto rollback;
Ido Schimmelb03804e2015-12-03 12:12:03 +01006471
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006472 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006473
David Ahernf1170fd2016-10-17 19:15:51 -07006474rollback:
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006475 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006476
6477 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006478}
6479
6480/**
6481 * netdev_upper_dev_link - Add a link to the upper device
6482 * @dev: device
6483 * @upper_dev: new upper device
Florian Fainelli7a006d52018-01-22 19:14:28 -08006484 * @extack: netlink extended ack
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006485 *
6486 * Adds a link to device which is upper to this one. The caller must hold
6487 * the RTNL lock. On a failure a negative errno code is returned.
6488 * On success the reference counts are adjusted and the function
6489 * returns zero.
6490 */
6491int netdev_upper_dev_link(struct net_device *dev,
David Ahern42ab19e2017-10-04 17:48:47 -07006492 struct net_device *upper_dev,
6493 struct netlink_ext_ack *extack)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006494{
David Ahern42ab19e2017-10-04 17:48:47 -07006495 return __netdev_upper_dev_link(dev, upper_dev, false,
6496 NULL, NULL, extack);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006497}
6498EXPORT_SYMBOL(netdev_upper_dev_link);
6499
6500/**
6501 * netdev_master_upper_dev_link - Add a master link to the upper device
6502 * @dev: device
6503 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01006504 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006505 * @upper_info: upper info to be passed down via notifier
Florian Fainelli7a006d52018-01-22 19:14:28 -08006506 * @extack: netlink extended ack
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006507 *
6508 * Adds a link to device which is upper to this one. In this case, only
6509 * one master upper device can be linked, although other non-master devices
6510 * might be linked as well. The caller must hold the RTNL lock.
6511 * On a failure a negative errno code is returned. On success the reference
6512 * counts are adjusted and the function returns zero.
6513 */
6514int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01006515 struct net_device *upper_dev,
David Ahern42ab19e2017-10-04 17:48:47 -07006516 void *upper_priv, void *upper_info,
6517 struct netlink_ext_ack *extack)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006518{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006519 return __netdev_upper_dev_link(dev, upper_dev, true,
David Ahern42ab19e2017-10-04 17:48:47 -07006520 upper_priv, upper_info, extack);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006521}
6522EXPORT_SYMBOL(netdev_master_upper_dev_link);
6523
6524/**
6525 * netdev_upper_dev_unlink - Removes a link to upper device
6526 * @dev: device
6527 * @upper_dev: new upper device
6528 *
6529 * Removes a link to device which is upper to this one. The caller must hold
6530 * the RTNL lock.
6531 */
6532void netdev_upper_dev_unlink(struct net_device *dev,
6533 struct net_device *upper_dev)
6534{
David Ahern51d0c0472017-10-04 17:48:45 -07006535 struct netdev_notifier_changeupper_info changeupper_info = {
6536 .info = {
6537 .dev = dev,
6538 },
6539 .upper_dev = upper_dev,
6540 .linking = false,
6541 };
tchardingf4563a72017-02-09 17:56:07 +11006542
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006543 ASSERT_RTNL();
6544
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006545 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006546
David Ahern51d0c0472017-10-04 17:48:45 -07006547 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006548 &changeupper_info.info);
6549
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006550 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006551
David Ahern51d0c0472017-10-04 17:48:45 -07006552 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006553 &changeupper_info.info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006554}
6555EXPORT_SYMBOL(netdev_upper_dev_unlink);
6556
Moni Shoua61bd3852015-02-03 16:48:29 +02006557/**
6558 * netdev_bonding_info_change - Dispatch event about slave change
6559 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09006560 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02006561 *
6562 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6563 * The caller must hold the RTNL lock.
6564 */
6565void netdev_bonding_info_change(struct net_device *dev,
6566 struct netdev_bonding_info *bonding_info)
6567{
David Ahern51d0c0472017-10-04 17:48:45 -07006568 struct netdev_notifier_bonding_info info = {
6569 .info.dev = dev,
6570 };
Moni Shoua61bd3852015-02-03 16:48:29 +02006571
6572 memcpy(&info.bonding_info, bonding_info,
6573 sizeof(struct netdev_bonding_info));
David Ahern51d0c0472017-10-04 17:48:45 -07006574 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
Moni Shoua61bd3852015-02-03 16:48:29 +02006575 &info.info);
6576}
6577EXPORT_SYMBOL(netdev_bonding_info_change);
6578
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006579static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006580{
6581 struct netdev_adjacent *iter;
6582
6583 struct net *net = dev_net(dev);
6584
6585 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006586 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006587 continue;
6588 netdev_adjacent_sysfs_add(iter->dev, dev,
6589 &iter->dev->adj_list.lower);
6590 netdev_adjacent_sysfs_add(dev, iter->dev,
6591 &dev->adj_list.upper);
6592 }
6593
6594 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006595 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006596 continue;
6597 netdev_adjacent_sysfs_add(iter->dev, dev,
6598 &iter->dev->adj_list.upper);
6599 netdev_adjacent_sysfs_add(dev, iter->dev,
6600 &dev->adj_list.lower);
6601 }
6602}
6603
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006604static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006605{
6606 struct netdev_adjacent *iter;
6607
6608 struct net *net = dev_net(dev);
6609
6610 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006611 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006612 continue;
6613 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6614 &iter->dev->adj_list.lower);
6615 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6616 &dev->adj_list.upper);
6617 }
6618
6619 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006620 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006621 continue;
6622 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6623 &iter->dev->adj_list.upper);
6624 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6625 &dev->adj_list.lower);
6626 }
6627}
6628
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006629void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02006630{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006631 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006632
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006633 struct net *net = dev_net(dev);
6634
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006635 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006636 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006637 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006638 netdev_adjacent_sysfs_del(iter->dev, oldname,
6639 &iter->dev->adj_list.lower);
6640 netdev_adjacent_sysfs_add(iter->dev, dev,
6641 &iter->dev->adj_list.lower);
6642 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006643
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006644 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006645 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006646 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006647 netdev_adjacent_sysfs_del(iter->dev, oldname,
6648 &iter->dev->adj_list.upper);
6649 netdev_adjacent_sysfs_add(iter->dev, dev,
6650 &iter->dev->adj_list.upper);
6651 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006652}
Veaceslav Falico402dae92013-09-25 09:20:09 +02006653
6654void *netdev_lower_dev_get_private(struct net_device *dev,
6655 struct net_device *lower_dev)
6656{
6657 struct netdev_adjacent *lower;
6658
6659 if (!lower_dev)
6660 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006661 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02006662 if (!lower)
6663 return NULL;
6664
6665 return lower->private;
6666}
6667EXPORT_SYMBOL(netdev_lower_dev_get_private);
6668
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006669
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006670int dev_get_nest_level(struct net_device *dev)
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006671{
6672 struct net_device *lower = NULL;
6673 struct list_head *iter;
6674 int max_nest = -1;
6675 int nest;
6676
6677 ASSERT_RTNL();
6678
6679 netdev_for_each_lower_dev(dev, lower, iter) {
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006680 nest = dev_get_nest_level(lower);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006681 if (max_nest < nest)
6682 max_nest = nest;
6683 }
6684
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006685 return max_nest + 1;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006686}
6687EXPORT_SYMBOL(dev_get_nest_level);
6688
Jiri Pirko04d48262015-12-03 12:12:15 +01006689/**
6690 * netdev_lower_change - Dispatch event about lower device state change
6691 * @lower_dev: device
6692 * @lower_state_info: state to dispatch
6693 *
6694 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6695 * The caller must hold the RTNL lock.
6696 */
6697void netdev_lower_state_changed(struct net_device *lower_dev,
6698 void *lower_state_info)
6699{
David Ahern51d0c0472017-10-04 17:48:45 -07006700 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
6701 .info.dev = lower_dev,
6702 };
Jiri Pirko04d48262015-12-03 12:12:15 +01006703
6704 ASSERT_RTNL();
6705 changelowerstate_info.lower_state_info = lower_state_info;
David Ahern51d0c0472017-10-04 17:48:45 -07006706 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
Jiri Pirko04d48262015-12-03 12:12:15 +01006707 &changelowerstate_info.info);
6708}
6709EXPORT_SYMBOL(netdev_lower_state_changed);
6710
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006711static void dev_change_rx_flags(struct net_device *dev, int flags)
6712{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006713 const struct net_device_ops *ops = dev->netdev_ops;
6714
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05006715 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006716 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006717}
6718
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006719static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07006720{
Eric Dumazetb536db92011-11-30 21:42:26 +00006721 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006722 kuid_t uid;
6723 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07006724
Patrick McHardy24023452007-07-14 18:51:31 -07006725 ASSERT_RTNL();
6726
Wang Chendad9b332008-06-18 01:48:28 -07006727 dev->flags |= IFF_PROMISC;
6728 dev->promiscuity += inc;
6729 if (dev->promiscuity == 0) {
6730 /*
6731 * Avoid overflow.
6732 * If inc causes overflow, untouch promisc and return error.
6733 */
6734 if (inc < 0)
6735 dev->flags &= ~IFF_PROMISC;
6736 else {
6737 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006738 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6739 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006740 return -EOVERFLOW;
6741 }
6742 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006743 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006744 pr_info("device %s %s promiscuous mode\n",
6745 dev->name,
6746 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11006747 if (audit_enabled) {
6748 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006749 audit_log(current->audit_context, GFP_ATOMIC,
6750 AUDIT_ANOM_PROMISCUOUS,
6751 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6752 dev->name, (dev->flags & IFF_PROMISC),
6753 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07006754 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006755 from_kuid(&init_user_ns, uid),
6756 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006757 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11006758 }
Patrick McHardy24023452007-07-14 18:51:31 -07006759
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006760 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07006761 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006762 if (notify)
6763 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07006764 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006765}
6766
Linus Torvalds1da177e2005-04-16 15:20:36 -07006767/**
6768 * dev_set_promiscuity - update promiscuity count on a device
6769 * @dev: device
6770 * @inc: modifier
6771 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07006772 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006773 * remains above zero the interface remains promiscuous. Once it hits zero
6774 * the device reverts back to normal filtering operation. A negative inc
6775 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07006776 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006777 */
Wang Chendad9b332008-06-18 01:48:28 -07006778int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006779{
Eric Dumazetb536db92011-11-30 21:42:26 +00006780 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07006781 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006783 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07006784 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07006785 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07006786 if (dev->flags != old_flags)
6787 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07006788 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006789}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006790EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006791
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006792static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006793{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006794 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006795
Patrick McHardy24023452007-07-14 18:51:31 -07006796 ASSERT_RTNL();
6797
Linus Torvalds1da177e2005-04-16 15:20:36 -07006798 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07006799 dev->allmulti += inc;
6800 if (dev->allmulti == 0) {
6801 /*
6802 * Avoid overflow.
6803 * If inc causes overflow, untouch allmulti and return error.
6804 */
6805 if (inc < 0)
6806 dev->flags &= ~IFF_ALLMULTI;
6807 else {
6808 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006809 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6810 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006811 return -EOVERFLOW;
6812 }
6813 }
Patrick McHardy24023452007-07-14 18:51:31 -07006814 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006815 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07006816 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006817 if (notify)
6818 __dev_notify_flags(dev, old_flags,
6819 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07006820 }
Wang Chendad9b332008-06-18 01:48:28 -07006821 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006822}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006823
6824/**
6825 * dev_set_allmulti - update allmulti count on a device
6826 * @dev: device
6827 * @inc: modifier
6828 *
6829 * Add or remove reception of all multicast frames to a device. While the
6830 * count in the device remains above zero the interface remains listening
6831 * to all interfaces. Once it hits zero the device reverts back to normal
6832 * filtering operation. A negative @inc value is used to drop the counter
6833 * when releasing a resource needing all multicasts.
6834 * Return 0 if successful or a negative errno code on error.
6835 */
6836
6837int dev_set_allmulti(struct net_device *dev, int inc)
6838{
6839 return __dev_set_allmulti(dev, inc, true);
6840}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006841EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07006842
6843/*
6844 * Upload unicast and multicast address lists to device and
6845 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08006846 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07006847 * are present.
6848 */
6849void __dev_set_rx_mode(struct net_device *dev)
6850{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006851 const struct net_device_ops *ops = dev->netdev_ops;
6852
Patrick McHardy4417da62007-06-27 01:28:10 -07006853 /* dev_open will call this function so the list will stay sane. */
6854 if (!(dev->flags&IFF_UP))
6855 return;
6856
6857 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09006858 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07006859
Jiri Pirko01789342011-08-16 06:29:00 +00006860 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07006861 /* Unicast addresses changes may only happen under the rtnl,
6862 * therefore calling __dev_set_promiscuity here is safe.
6863 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006864 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006865 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006866 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006867 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006868 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006869 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07006870 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006871 }
Jiri Pirko01789342011-08-16 06:29:00 +00006872
6873 if (ops->ndo_set_rx_mode)
6874 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006875}
6876
6877void dev_set_rx_mode(struct net_device *dev)
6878{
David S. Millerb9e40852008-07-15 00:15:08 -07006879 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006880 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07006881 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006882}
6883
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006884/**
6885 * dev_get_flags - get flags reported to userspace
6886 * @dev: device
6887 *
6888 * Get the combination of flag bits exported through APIs to userspace.
6889 */
Eric Dumazet95c96172012-04-15 05:58:06 +00006890unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006891{
Eric Dumazet95c96172012-04-15 05:58:06 +00006892 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006893
6894 flags = (dev->flags & ~(IFF_PROMISC |
6895 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08006896 IFF_RUNNING |
6897 IFF_LOWER_UP |
6898 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07006899 (dev->gflags & (IFF_PROMISC |
6900 IFF_ALLMULTI));
6901
Stefan Rompfb00055a2006-03-20 17:09:11 -08006902 if (netif_running(dev)) {
6903 if (netif_oper_up(dev))
6904 flags |= IFF_RUNNING;
6905 if (netif_carrier_ok(dev))
6906 flags |= IFF_LOWER_UP;
6907 if (netif_dormant(dev))
6908 flags |= IFF_DORMANT;
6909 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006910
6911 return flags;
6912}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006913EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914
Patrick McHardybd380812010-02-26 06:34:53 +00006915int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916{
Eric Dumazetb536db92011-11-30 21:42:26 +00006917 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00006918 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006919
Patrick McHardy24023452007-07-14 18:51:31 -07006920 ASSERT_RTNL();
6921
Linus Torvalds1da177e2005-04-16 15:20:36 -07006922 /*
6923 * Set the flags on our device.
6924 */
6925
6926 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6927 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6928 IFF_AUTOMEDIA)) |
6929 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6930 IFF_ALLMULTI));
6931
6932 /*
6933 * Load in the correct multicast list now the flags have changed.
6934 */
6935
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006936 if ((old_flags ^ flags) & IFF_MULTICAST)
6937 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07006938
Patrick McHardy4417da62007-06-27 01:28:10 -07006939 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006940
6941 /*
6942 * Have we downed the interface. We handle IFF_UP ourselves
6943 * according to user attempts to set it, rather than blindly
6944 * setting it.
6945 */
6946
6947 ret = 0;
stephen hemminger7051b882017-07-18 15:59:27 -07006948 if ((old_flags ^ flags) & IFF_UP) {
6949 if (old_flags & IFF_UP)
6950 __dev_close(dev);
6951 else
6952 ret = __dev_open(dev);
6953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006954
Linus Torvalds1da177e2005-04-16 15:20:36 -07006955 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006956 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006957 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006958
Linus Torvalds1da177e2005-04-16 15:20:36 -07006959 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006960
6961 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6962 if (dev->flags != old_flags)
6963 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006964 }
6965
6966 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
tchardingeb13da12017-02-09 17:56:06 +11006967 * is important. Some (broken) drivers set IFF_PROMISC, when
6968 * IFF_ALLMULTI is requested not asking us and not reporting.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006969 */
6970 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006971 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6972
Linus Torvalds1da177e2005-04-16 15:20:36 -07006973 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006974 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006975 }
6976
Patrick McHardybd380812010-02-26 06:34:53 +00006977 return ret;
6978}
6979
Nicolas Dichtela528c212013-09-25 12:02:44 +02006980void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6981 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00006982{
6983 unsigned int changes = dev->flags ^ old_flags;
6984
Nicolas Dichtela528c212013-09-25 12:02:44 +02006985 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006986 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006987
Patrick McHardybd380812010-02-26 06:34:53 +00006988 if (changes & IFF_UP) {
6989 if (dev->flags & IFF_UP)
6990 call_netdevice_notifiers(NETDEV_UP, dev);
6991 else
6992 call_netdevice_notifiers(NETDEV_DOWN, dev);
6993 }
6994
6995 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00006996 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
David Ahern51d0c0472017-10-04 17:48:45 -07006997 struct netdev_notifier_change_info change_info = {
6998 .info = {
6999 .dev = dev,
7000 },
7001 .flags_changed = changes,
7002 };
Jiri Pirkobe9efd32013-05-28 01:30:22 +00007003
David Ahern51d0c0472017-10-04 17:48:45 -07007004 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
Jiri Pirkobe9efd32013-05-28 01:30:22 +00007005 }
Patrick McHardybd380812010-02-26 06:34:53 +00007006}
7007
7008/**
7009 * dev_change_flags - change device settings
7010 * @dev: device
7011 * @flags: device state flags
7012 *
7013 * Change settings on device based state flags. The flags are
7014 * in the userspace exported format.
7015 */
Eric Dumazetb536db92011-11-30 21:42:26 +00007016int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00007017{
Eric Dumazetb536db92011-11-30 21:42:26 +00007018 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02007019 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00007020
7021 ret = __dev_change_flags(dev, flags);
7022 if (ret < 0)
7023 return ret;
7024
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02007025 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02007026 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007027 return ret;
7028}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007029EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007030
WANG Congf51048c2017-07-06 15:01:57 -07007031int __dev_set_mtu(struct net_device *dev, int new_mtu)
Veaceslav Falico2315dc92014-01-10 16:56:25 +01007032{
7033 const struct net_device_ops *ops = dev->netdev_ops;
7034
7035 if (ops->ndo_change_mtu)
7036 return ops->ndo_change_mtu(dev, new_mtu);
7037
7038 dev->mtu = new_mtu;
7039 return 0;
7040}
WANG Congf51048c2017-07-06 15:01:57 -07007041EXPORT_SYMBOL(__dev_set_mtu);
Veaceslav Falico2315dc92014-01-10 16:56:25 +01007042
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007043/**
7044 * dev_set_mtu - Change maximum transfer unit
7045 * @dev: device
7046 * @new_mtu: new transfer unit
7047 *
7048 * Change the maximum transfer size of the network device.
7049 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007050int dev_set_mtu(struct net_device *dev, int new_mtu)
7051{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01007052 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007053
7054 if (new_mtu == dev->mtu)
7055 return 0;
7056
Jarod Wilson61e84622016-10-07 22:04:33 -04007057 /* MTU must be positive, and in range */
7058 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
7059 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
7060 dev->name, new_mtu, dev->min_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007061 return -EINVAL;
Jarod Wilson61e84622016-10-07 22:04:33 -04007062 }
7063
7064 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
7065 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
Jakub Kicinskia0e65de2016-10-17 18:02:22 +01007066 dev->name, new_mtu, dev->max_mtu);
Jarod Wilson61e84622016-10-07 22:04:33 -04007067 return -EINVAL;
7068 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007069
7070 if (!netif_device_present(dev))
7071 return -ENODEV;
7072
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01007073 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
7074 err = notifier_to_errno(err);
7075 if (err)
7076 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007077
Veaceslav Falico2315dc92014-01-10 16:56:25 +01007078 orig_mtu = dev->mtu;
7079 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007080
Veaceslav Falico2315dc92014-01-10 16:56:25 +01007081 if (!err) {
7082 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
7083 err = notifier_to_errno(err);
7084 if (err) {
7085 /* setting mtu back and notifying everyone again,
7086 * so that they have a chance to revert changes.
7087 */
7088 __dev_set_mtu(dev, orig_mtu);
7089 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
7090 }
7091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007092 return err;
7093}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007094EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007095
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007096/**
Cong Wang6a643dd2018-01-25 18:26:22 -08007097 * dev_change_tx_queue_len - Change TX queue length of a netdevice
7098 * @dev: device
7099 * @new_len: new tx queue length
7100 */
7101int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7102{
7103 unsigned int orig_len = dev->tx_queue_len;
7104 int res;
7105
7106 if (new_len != (unsigned int)new_len)
7107 return -ERANGE;
7108
7109 if (new_len != orig_len) {
7110 dev->tx_queue_len = new_len;
7111 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7112 res = notifier_to_errno(res);
7113 if (res) {
7114 netdev_err(dev,
7115 "refused to change device tx_queue_len\n");
7116 dev->tx_queue_len = orig_len;
7117 return res;
7118 }
Cong Wang48bfd552018-01-25 18:26:23 -08007119 return dev_qdisc_change_tx_queue_len(dev);
Cong Wang6a643dd2018-01-25 18:26:22 -08007120 }
7121
7122 return 0;
7123}
7124
7125/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00007126 * dev_set_group - Change group this device belongs to
7127 * @dev: device
7128 * @new_group: group this device should belong to
7129 */
7130void dev_set_group(struct net_device *dev, int new_group)
7131{
7132 dev->group = new_group;
7133}
7134EXPORT_SYMBOL(dev_set_group);
7135
7136/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007137 * dev_set_mac_address - Change Media Access Control Address
7138 * @dev: device
7139 * @sa: new address
7140 *
7141 * Change the hardware (MAC) address of the device
7142 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007143int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
7144{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007145 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 int err;
7147
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007148 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149 return -EOPNOTSUPP;
7150 if (sa->sa_family != dev->type)
7151 return -EINVAL;
7152 if (!netif_device_present(dev))
7153 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007154 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00007155 if (err)
7156 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00007157 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00007158 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04007159 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00007160 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007161}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007162EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007163
Jiri Pirko4bf84c32012-12-27 23:49:37 +00007164/**
7165 * dev_change_carrier - Change device carrier
7166 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00007167 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00007168 *
7169 * Change device carrier
7170 */
7171int dev_change_carrier(struct net_device *dev, bool new_carrier)
7172{
7173 const struct net_device_ops *ops = dev->netdev_ops;
7174
7175 if (!ops->ndo_change_carrier)
7176 return -EOPNOTSUPP;
7177 if (!netif_device_present(dev))
7178 return -ENODEV;
7179 return ops->ndo_change_carrier(dev, new_carrier);
7180}
7181EXPORT_SYMBOL(dev_change_carrier);
7182
Linus Torvalds1da177e2005-04-16 15:20:36 -07007183/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02007184 * dev_get_phys_port_id - Get device physical port ID
7185 * @dev: device
7186 * @ppid: port ID
7187 *
7188 * Get device physical port ID
7189 */
7190int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01007191 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02007192{
7193 const struct net_device_ops *ops = dev->netdev_ops;
7194
7195 if (!ops->ndo_get_phys_port_id)
7196 return -EOPNOTSUPP;
7197 return ops->ndo_get_phys_port_id(dev, ppid);
7198}
7199EXPORT_SYMBOL(dev_get_phys_port_id);
7200
7201/**
David Aherndb24a902015-03-17 20:23:15 -06007202 * dev_get_phys_port_name - Get device physical port name
7203 * @dev: device
7204 * @name: port name
Luis de Bethencourted49e652016-03-21 16:31:14 +00007205 * @len: limit of bytes to copy to name
David Aherndb24a902015-03-17 20:23:15 -06007206 *
7207 * Get device physical port name
7208 */
7209int dev_get_phys_port_name(struct net_device *dev,
7210 char *name, size_t len)
7211{
7212 const struct net_device_ops *ops = dev->netdev_ops;
7213
7214 if (!ops->ndo_get_phys_port_name)
7215 return -EOPNOTSUPP;
7216 return ops->ndo_get_phys_port_name(dev, name, len);
7217}
7218EXPORT_SYMBOL(dev_get_phys_port_name);
7219
7220/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07007221 * dev_change_proto_down - update protocol port state information
7222 * @dev: device
7223 * @proto_down: new value
7224 *
7225 * This info can be used by switch drivers to set the phys state of the
7226 * port.
7227 */
7228int dev_change_proto_down(struct net_device *dev, bool proto_down)
7229{
7230 const struct net_device_ops *ops = dev->netdev_ops;
7231
7232 if (!ops->ndo_change_proto_down)
7233 return -EOPNOTSUPP;
7234 if (!netif_device_present(dev))
7235 return -ENODEV;
7236 return ops->ndo_change_proto_down(dev, proto_down);
7237}
7238EXPORT_SYMBOL(dev_change_proto_down);
7239
Jakub Kicinski118b4aa2017-12-01 15:08:55 -08007240void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
7241 struct netdev_bpf *xdp)
7242{
7243 memset(xdp, 0, sizeof(*xdp));
7244 xdp->command = XDP_QUERY_PROG;
7245
7246 /* Query must always succeed. */
7247 WARN_ON(bpf_op(dev, xdp) < 0);
7248}
7249
7250static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007251{
Jakub Kicinskif4e63522017-11-03 13:56:16 -07007252 struct netdev_bpf xdp;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007253
Jakub Kicinski118b4aa2017-12-01 15:08:55 -08007254 __dev_xdp_query(dev, bpf_op, &xdp);
Martin KaFai Lau58038692017-06-15 17:29:09 -07007255
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007256 return xdp.prog_attached;
7257}
7258
Jakub Kicinskif4e63522017-11-03 13:56:16 -07007259static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
Jakub Kicinski32d60272017-06-21 18:25:03 -07007260 struct netlink_ext_ack *extack, u32 flags,
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007261 struct bpf_prog *prog)
7262{
Jakub Kicinskif4e63522017-11-03 13:56:16 -07007263 struct netdev_bpf xdp;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007264
7265 memset(&xdp, 0, sizeof(xdp));
Jakub Kicinskiee5d0322017-06-21 18:25:04 -07007266 if (flags & XDP_FLAGS_HW_MODE)
7267 xdp.command = XDP_SETUP_PROG_HW;
7268 else
7269 xdp.command = XDP_SETUP_PROG;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007270 xdp.extack = extack;
Jakub Kicinski32d60272017-06-21 18:25:03 -07007271 xdp.flags = flags;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007272 xdp.prog = prog;
7273
Jakub Kicinskif4e63522017-11-03 13:56:16 -07007274 return bpf_op(dev, &xdp);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007275}
7276
Jakub Kicinskibd0b2e72017-12-01 15:08:57 -08007277static void dev_xdp_uninstall(struct net_device *dev)
7278{
7279 struct netdev_bpf xdp;
7280 bpf_op_t ndo_bpf;
7281
7282 /* Remove generic XDP */
7283 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
7284
7285 /* Remove from the driver */
7286 ndo_bpf = dev->netdev_ops->ndo_bpf;
7287 if (!ndo_bpf)
7288 return;
7289
7290 __dev_xdp_query(dev, ndo_bpf, &xdp);
7291 if (xdp.prog_attached == XDP_ATTACHED_NONE)
7292 return;
7293
7294 /* Program removal should always succeed */
7295 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
7296}
7297
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07007298/**
Brenden Blancoa7862b42016-07-19 12:16:48 -07007299 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
7300 * @dev: device
Jakub Kicinskib5d60982017-05-01 15:53:43 -07007301 * @extack: netlink extended ack
Brenden Blancoa7862b42016-07-19 12:16:48 -07007302 * @fd: new program fd or negative value to clear
Daniel Borkmann85de8572016-11-28 23:16:54 +01007303 * @flags: xdp-related flags
Brenden Blancoa7862b42016-07-19 12:16:48 -07007304 *
7305 * Set or clear a bpf program for a device
7306 */
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07007307int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7308 int fd, u32 flags)
Brenden Blancoa7862b42016-07-19 12:16:48 -07007309{
7310 const struct net_device_ops *ops = dev->netdev_ops;
7311 struct bpf_prog *prog = NULL;
Jakub Kicinskif4e63522017-11-03 13:56:16 -07007312 bpf_op_t bpf_op, bpf_chk;
Brenden Blancoa7862b42016-07-19 12:16:48 -07007313 int err;
7314
Daniel Borkmann85de8572016-11-28 23:16:54 +01007315 ASSERT_RTNL();
7316
Jakub Kicinskif4e63522017-11-03 13:56:16 -07007317 bpf_op = bpf_chk = ops->ndo_bpf;
7318 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
Daniel Borkmann0489df92017-05-12 01:04:45 +02007319 return -EOPNOTSUPP;
Jakub Kicinskif4e63522017-11-03 13:56:16 -07007320 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
7321 bpf_op = generic_xdp_install;
7322 if (bpf_op == bpf_chk)
7323 bpf_chk = generic_xdp_install;
David S. Millerb5cdae32017-04-18 15:36:58 -04007324
Brenden Blancoa7862b42016-07-19 12:16:48 -07007325 if (fd >= 0) {
Jakub Kicinski118b4aa2017-12-01 15:08:55 -08007326 if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007327 return -EEXIST;
7328 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
Jakub Kicinski118b4aa2017-12-01 15:08:55 -08007329 __dev_xdp_attached(dev, bpf_op))
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007330 return -EBUSY;
Daniel Borkmann85de8572016-11-28 23:16:54 +01007331
Jakub Kicinski288b3de2017-11-20 15:21:54 -08007332 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
7333 bpf_op == ops->ndo_bpf);
Brenden Blancoa7862b42016-07-19 12:16:48 -07007334 if (IS_ERR(prog))
7335 return PTR_ERR(prog);
Jakub Kicinski441a3302017-11-20 15:21:55 -08007336
7337 if (!(flags & XDP_FLAGS_HW_MODE) &&
7338 bpf_prog_is_dev_bound(prog->aux)) {
7339 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
7340 bpf_prog_put(prog);
7341 return -EINVAL;
7342 }
Brenden Blancoa7862b42016-07-19 12:16:48 -07007343 }
7344
Jakub Kicinskif4e63522017-11-03 13:56:16 -07007345 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
Brenden Blancoa7862b42016-07-19 12:16:48 -07007346 if (err < 0 && prog)
7347 bpf_prog_put(prog);
7348
7349 return err;
7350}
Brenden Blancoa7862b42016-07-19 12:16:48 -07007351
7352/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007353 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07007354 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07007355 *
7356 * Returns a suitable unique value for a new device interface
7357 * number. The caller must hold the rtnl semaphore or the
7358 * dev_base_lock to be sure it remains unique.
7359 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07007360static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007361{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00007362 int ifindex = net->ifindex;
tchardingf4563a72017-02-09 17:56:07 +11007363
Linus Torvalds1da177e2005-04-16 15:20:36 -07007364 for (;;) {
7365 if (++ifindex <= 0)
7366 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007367 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00007368 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007369 }
7370}
7371
Linus Torvalds1da177e2005-04-16 15:20:36 -07007372/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08007373static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07007374DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007375
Stephen Hemminger6f05f622007-03-08 20:46:03 -08007376static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007377{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007378 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007379 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007380}
7381
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007382static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007383{
Krishna Kumare93737b2009-12-08 22:26:02 +00007384 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007385 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007386
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007387 BUG_ON(dev_boot_phase);
7388 ASSERT_RTNL();
7389
Krishna Kumare93737b2009-12-08 22:26:02 +00007390 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007391 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00007392 * for initialization unwind. Remove those
7393 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007394 */
7395 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007396 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7397 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007398
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007399 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00007400 list_del(&dev->unreg_list);
7401 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007402 }
Eric Dumazet449f4542011-05-19 12:24:16 +00007403 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007404 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00007405 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007406
Octavian Purdila44345722010-12-13 12:44:07 +00007407 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007408 list_for_each_entry(dev, head, unreg_list)
7409 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04007410 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007411
Octavian Purdila44345722010-12-13 12:44:07 +00007412 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007413 /* And unlink it from device chain. */
7414 unlist_netdevice(dev);
7415
7416 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007417 }
Eric Dumazet41852492016-08-26 12:50:39 -07007418 flush_all_backlogs();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007419
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007420 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007421
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007422 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007423 struct sk_buff *skb = NULL;
7424
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007425 /* Shutdown queueing discipline. */
7426 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007427
Jakub Kicinskibd0b2e72017-12-01 15:08:57 -08007428 dev_xdp_uninstall(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007429
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007430 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11007431 * this device. They should clean all the things.
7432 */
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007433 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7434
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007435 if (!dev->rtnl_link_ops ||
7436 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04007437 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
Nicolas Dichtel38e01b32018-01-25 15:01:39 +01007438 GFP_KERNEL, NULL, 0);
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007439
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007440 /*
7441 * Flush the unicast and multicast chains
7442 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007443 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00007444 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007445
7446 if (dev->netdev_ops->ndo_uninit)
7447 dev->netdev_ops->ndo_uninit(dev);
7448
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007449 if (skb)
7450 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07007451
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007452 /* Notifier chain MUST detach us all upper devices. */
7453 WARN_ON(netdev_has_any_upper_dev(dev));
David Ahern0f524a82016-10-17 19:15:52 -07007454 WARN_ON(netdev_has_any_lower_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007455
7456 /* Remove entries from kobject tree */
7457 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00007458#ifdef CONFIG_XPS
7459 /* Remove XPS queueing entries */
7460 netif_reset_xps_queues_gt(dev, 0);
7461#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007462 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007463
Eric W. Biederman850a5452011-10-13 22:25:23 +00007464 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007465
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00007466 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007467 dev_put(dev);
7468}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007469
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007470static void rollback_registered(struct net_device *dev)
7471{
7472 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007473
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007474 list_add(&dev->unreg_list, &single);
7475 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007476 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007477}
7478
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007479static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7480 struct net_device *upper, netdev_features_t features)
7481{
7482 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7483 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007484 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007485
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007486 for_each_netdev_feature(&upper_disables, feature_bit) {
7487 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007488 if (!(upper->wanted_features & feature)
7489 && (features & feature)) {
7490 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7491 &feature, upper->name);
7492 features &= ~feature;
7493 }
7494 }
7495
7496 return features;
7497}
7498
7499static void netdev_sync_lower_features(struct net_device *upper,
7500 struct net_device *lower, netdev_features_t features)
7501{
7502 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7503 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007504 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007505
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007506 for_each_netdev_feature(&upper_disables, feature_bit) {
7507 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007508 if (!(features & feature) && (lower->features & feature)) {
7509 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7510 &feature, lower->name);
7511 lower->wanted_features &= ~feature;
7512 netdev_update_features(lower);
7513
7514 if (unlikely(lower->features & feature))
7515 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7516 &feature, lower->name);
7517 }
7518 }
7519}
7520
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007521static netdev_features_t netdev_fix_features(struct net_device *dev,
7522 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07007523{
Michał Mirosław57422dc2011-01-22 12:14:12 +00007524 /* Fix illegal checksum combinations */
7525 if ((features & NETIF_F_HW_CSUM) &&
7526 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007527 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00007528 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7529 }
7530
Herbert Xub63365a2008-10-23 01:11:29 -07007531 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007532 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007533 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007534 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07007535 }
7536
Pravin B Shelarec5f0612013-03-07 09:28:01 +00007537 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7538 !(features & NETIF_F_IP_CSUM)) {
7539 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7540 features &= ~NETIF_F_TSO;
7541 features &= ~NETIF_F_TSO_ECN;
7542 }
7543
7544 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7545 !(features & NETIF_F_IPV6_CSUM)) {
7546 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7547 features &= ~NETIF_F_TSO6;
7548 }
7549
Alexander Duyckb1dc4972016-05-02 09:38:24 -07007550 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7551 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7552 features &= ~NETIF_F_TSO_MANGLEID;
7553
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00007554 /* TSO ECN requires that TSO is present as well. */
7555 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7556 features &= ~NETIF_F_TSO_ECN;
7557
Michał Mirosław212b5732011-02-15 16:59:16 +00007558 /* Software GSO depends on SG. */
7559 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007560 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00007561 features &= ~NETIF_F_GSO;
7562 }
7563
Alexander Duyck802ab552016-04-10 21:45:03 -04007564 /* GSO partial features require GSO partial be set */
7565 if ((features & dev->gso_partial_features) &&
7566 !(features & NETIF_F_GSO_PARTIAL)) {
7567 netdev_dbg(dev,
7568 "Dropping partially supported GSO features since no GSO partial.\n");
7569 features &= ~dev->gso_partial_features;
7570 }
7571
Michael Chanfb1f5f72017-12-16 03:09:40 -05007572 if (!(features & NETIF_F_RXCSUM)) {
7573 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
7574 * successfully merged by hardware must also have the
7575 * checksum verified by hardware. If the user does not
7576 * want to enable RXCSUM, logically, we should disable GRO_HW.
7577 */
7578 if (features & NETIF_F_GRO_HW) {
7579 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
7580 features &= ~NETIF_F_GRO_HW;
7581 }
7582 }
7583
Gal Pressmande8d5ab2018-03-12 11:48:49 +02007584 /* LRO/HW-GRO features cannot be combined with RX-FCS */
7585 if (features & NETIF_F_RXFCS) {
7586 if (features & NETIF_F_LRO) {
7587 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
7588 features &= ~NETIF_F_LRO;
7589 }
7590
7591 if (features & NETIF_F_GRO_HW) {
7592 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
7593 features &= ~NETIF_F_GRO_HW;
7594 }
Gal Pressmane6c6a922018-03-04 14:12:04 +02007595 }
7596
Herbert Xub63365a2008-10-23 01:11:29 -07007597 return features;
7598}
Herbert Xub63365a2008-10-23 01:11:29 -07007599
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007600int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00007601{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007602 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007603 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007604 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05007605 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00007606
Michał Mirosław87267482011-04-12 09:56:38 +00007607 ASSERT_RTNL();
7608
Michał Mirosław5455c692011-02-15 16:59:17 +00007609 features = netdev_get_wanted_features(dev);
7610
7611 if (dev->netdev_ops->ndo_fix_features)
7612 features = dev->netdev_ops->ndo_fix_features(dev, features);
7613
7614 /* driver might be less strict about feature dependencies */
7615 features = netdev_fix_features(dev, features);
7616
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007617 /* some features can't be enabled if they're off an an upper device */
7618 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7619 features = netdev_sync_upper_features(dev, upper, features);
7620
Michał Mirosław5455c692011-02-15 16:59:17 +00007621 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05007622 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00007623
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007624 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7625 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00007626
7627 if (dev->netdev_ops->ndo_set_features)
7628 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01007629 else
7630 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00007631
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007632 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00007633 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007634 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7635 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01007636 /* return non-0 since some features might have changed and
7637 * it's better to fire a spurious notification than miss it
7638 */
7639 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007640 }
7641
Jarod Wilsone7868a82015-11-03 23:09:32 -05007642sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007643 /* some features must be disabled on lower devices when disabled
7644 * on an upper device (think: bonding master or bridge)
7645 */
7646 netdev_for_each_lower_dev(dev, lower, iter)
7647 netdev_sync_lower_features(dev, lower, features);
7648
Sabrina Dubrocaae847f42017-07-21 12:49:31 +02007649 if (!err) {
7650 netdev_features_t diff = features ^ dev->features;
7651
7652 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
7653 /* udp_tunnel_{get,drop}_rx_info both need
7654 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
7655 * device, or they won't do anything.
7656 * Thus we need to update dev->features
7657 * *before* calling udp_tunnel_get_rx_info,
7658 * but *after* calling udp_tunnel_drop_rx_info.
7659 */
7660 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
7661 dev->features = features;
7662 udp_tunnel_get_rx_info(dev);
7663 } else {
7664 udp_tunnel_drop_rx_info(dev);
7665 }
7666 }
7667
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007668 dev->features = features;
Sabrina Dubrocaae847f42017-07-21 12:49:31 +02007669 }
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007670
Jarod Wilsone7868a82015-11-03 23:09:32 -05007671 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007672}
7673
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007674/**
7675 * netdev_update_features - recalculate device features
7676 * @dev: the device to check
7677 *
7678 * Recalculate dev->features set and send notifications if it
7679 * has changed. Should be called after driver or hardware dependent
7680 * conditions might have changed that influence the features.
7681 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007682void netdev_update_features(struct net_device *dev)
7683{
7684 if (__netdev_update_features(dev))
7685 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00007686}
7687EXPORT_SYMBOL(netdev_update_features);
7688
Linus Torvalds1da177e2005-04-16 15:20:36 -07007689/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007690 * netdev_change_features - recalculate device features
7691 * @dev: the device to check
7692 *
7693 * Recalculate dev->features set and send notifications even
7694 * if they have not changed. Should be called instead of
7695 * netdev_update_features() if also dev->vlan_features might
7696 * have changed to allow the changes to be propagated to stacked
7697 * VLAN devices.
7698 */
7699void netdev_change_features(struct net_device *dev)
7700{
7701 __netdev_update_features(dev);
7702 netdev_features_change(dev);
7703}
7704EXPORT_SYMBOL(netdev_change_features);
7705
7706/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007707 * netif_stacked_transfer_operstate - transfer operstate
7708 * @rootdev: the root or lower level device to transfer state from
7709 * @dev: the device to transfer operstate to
7710 *
7711 * Transfer operational state from root to device. This is normally
7712 * called when a stacking relationship exists between the root
7713 * device and the device(a leaf device).
7714 */
7715void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7716 struct net_device *dev)
7717{
7718 if (rootdev->operstate == IF_OPER_DORMANT)
7719 netif_dormant_on(dev);
7720 else
7721 netif_dormant_off(dev);
7722
Zhang Shengju0575c862017-04-26 17:49:38 +08007723 if (netif_carrier_ok(rootdev))
7724 netif_carrier_on(dev);
7725 else
7726 netif_carrier_off(dev);
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007727}
7728EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7729
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007730static int netif_alloc_rx_queues(struct net_device *dev)
7731{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007732 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00007733 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05307734 size_t sz = count * sizeof(*rx);
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007735 int err = 0;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007736
Tom Herbertbd25fa72010-10-18 18:00:16 +00007737 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007738
Michal Hockodcda9b02017-07-12 14:36:45 -07007739 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Michal Hockoda6bc572017-05-08 15:57:31 -07007740 if (!rx)
7741 return -ENOMEM;
7742
Tom Herbertbd25fa72010-10-18 18:00:16 +00007743 dev->_rx = rx;
7744
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007745 for (i = 0; i < count; i++) {
Tom Herbertfe822242010-11-09 10:47:38 +00007746 rx[i].dev = dev;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007747
7748 /* XDP RX-queue setup */
7749 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
7750 if (err < 0)
7751 goto err_rxq_info;
7752 }
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007753 return 0;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007754
7755err_rxq_info:
7756 /* Rollback successful reg's and free other resources */
7757 while (i--)
7758 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
Jakub Kicinski141b52a2018-01-10 01:20:01 -08007759 kvfree(dev->_rx);
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007760 dev->_rx = NULL;
7761 return err;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007762}
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007763
7764static void netif_free_rx_queues(struct net_device *dev)
7765{
7766 unsigned int i, count = dev->num_rx_queues;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007767
7768 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
7769 if (!dev->_rx)
7770 return;
7771
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007772 for (i = 0; i < count; i++)
Jakub Kicinski82aaff22018-01-10 01:20:02 -08007773 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
7774
7775 kvfree(dev->_rx);
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01007776}
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007777
Changli Gaoaa942102010-12-04 02:31:41 +00007778static void netdev_init_one_queue(struct net_device *dev,
7779 struct netdev_queue *queue, void *_unused)
7780{
7781 /* Initialize queue lock */
7782 spin_lock_init(&queue->_xmit_lock);
7783 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7784 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00007785 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00007786 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00007787#ifdef CONFIG_BQL
7788 dql_init(&queue->dql, HZ);
7789#endif
Changli Gaoaa942102010-12-04 02:31:41 +00007790}
7791
Eric Dumazet60877a32013-06-20 01:15:51 -07007792static void netif_free_tx_queues(struct net_device *dev)
7793{
WANG Cong4cb28972014-06-02 15:55:22 -07007794 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07007795}
7796
Tom Herberte6484932010-10-18 18:04:39 +00007797static int netif_alloc_netdev_queues(struct net_device *dev)
7798{
7799 unsigned int count = dev->num_tx_queues;
7800 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07007801 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00007802
Eric Dumazetd3397272015-07-06 17:13:26 +02007803 if (count < 1 || count > 0xffff)
7804 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00007805
Michal Hockodcda9b02017-07-12 14:36:45 -07007806 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Michal Hockoda6bc572017-05-08 15:57:31 -07007807 if (!tx)
7808 return -ENOMEM;
7809
Tom Herberte6484932010-10-18 18:04:39 +00007810 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00007811
Tom Herberte6484932010-10-18 18:04:39 +00007812 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7813 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00007814
7815 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00007816}
7817
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007818void netif_tx_stop_all_queues(struct net_device *dev)
7819{
7820 unsigned int i;
7821
7822 for (i = 0; i < dev->num_tx_queues; i++) {
7823 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
tchardingf4563a72017-02-09 17:56:07 +11007824
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007825 netif_tx_stop_queue(txq);
7826 }
7827}
7828EXPORT_SYMBOL(netif_tx_stop_all_queues);
7829
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007830/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007831 * register_netdevice - register a network device
7832 * @dev: device to register
7833 *
7834 * Take a completed network device structure and add it to the kernel
7835 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7836 * chain. 0 is returned on success. A negative errno code is returned
7837 * on a failure to set up the device, or if the name is a duplicate.
7838 *
7839 * Callers must hold the rtnl semaphore. You may want
7840 * register_netdev() instead of this.
7841 *
7842 * BUGS:
7843 * The locking appears insufficient to guarantee two parallel registers
7844 * will not get the same name.
7845 */
7846
7847int register_netdevice(struct net_device *dev)
7848{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007849 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007850 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007851
7852 BUG_ON(dev_boot_phase);
7853 ASSERT_RTNL();
7854
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007855 might_sleep();
7856
Linus Torvalds1da177e2005-04-16 15:20:36 -07007857 /* When net_device's are persistent, this will be fatal. */
7858 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007859 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007860
David S. Millerf1f28aa2008-07-15 00:08:33 -07007861 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07007862 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007863
Gao feng828de4f2012-09-13 20:58:27 +00007864 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00007865 if (ret < 0)
7866 goto out;
7867
Linus Torvalds1da177e2005-04-16 15:20:36 -07007868 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007869 if (dev->netdev_ops->ndo_init) {
7870 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007871 if (ret) {
7872 if (ret > 0)
7873 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08007874 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007875 }
7876 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007877
Patrick McHardyf6469682013-04-19 02:04:27 +00007878 if (((dev->hw_features | dev->features) &
7879 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00007880 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7881 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7882 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7883 ret = -EINVAL;
7884 goto err_uninit;
7885 }
7886
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00007887 ret = -EBUSY;
7888 if (!dev->ifindex)
7889 dev->ifindex = dev_new_index(net);
7890 else if (__dev_get_by_index(net, dev->ifindex))
7891 goto err_uninit;
7892
Michał Mirosław5455c692011-02-15 16:59:17 +00007893 /* Transfer changeable features to wanted_features and enable
7894 * software offloads (GSO and GRO).
7895 */
7896 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00007897 dev->features |= NETIF_F_SOFT_FEATURES;
Sabrina Dubrocad764a122017-07-21 12:49:28 +02007898
7899 if (dev->netdev_ops->ndo_udp_tunnel_add) {
7900 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7901 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
7902 }
7903
Michał Mirosław14d12322011-02-22 16:52:28 +00007904 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007905
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007906 if (!(dev->flags & IFF_LOOPBACK))
Michał Mirosław34324dc2011-11-15 15:29:55 +00007907 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007908
Alexander Duyck7f348a62016-04-20 16:51:00 -04007909 /* If IPv4 TCP segmentation offload is supported we should also
7910 * allow the device to enable segmenting the frame with the option
7911 * of ignoring a static IP ID value. This doesn't enable the
7912 * feature itself but allows the user to enable it later.
7913 */
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007914 if (dev->hw_features & NETIF_F_TSO)
7915 dev->hw_features |= NETIF_F_TSO_MANGLEID;
Alexander Duyck7f348a62016-04-20 16:51:00 -04007916 if (dev->vlan_features & NETIF_F_TSO)
7917 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7918 if (dev->mpls_features & NETIF_F_TSO)
7919 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7920 if (dev->hw_enc_features & NETIF_F_TSO)
7921 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07007922
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007923 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00007924 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007925 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00007926
Pravin B Shelaree579672013-03-07 09:28:08 +00007927 /* Make NETIF_F_SG inheritable to tunnel devices.
7928 */
Alexander Duyck802ab552016-04-10 21:45:03 -04007929 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
Pravin B Shelaree579672013-03-07 09:28:08 +00007930
Simon Horman0d89d202013-05-23 21:02:52 +00007931 /* Make NETIF_F_SG inheritable to MPLS.
7932 */
7933 dev->mpls_features |= NETIF_F_SG;
7934
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00007935 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7936 ret = notifier_to_errno(ret);
7937 if (ret)
7938 goto err_uninit;
7939
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007940 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007941 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007942 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007943 dev->reg_state = NETREG_REGISTERED;
7944
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007945 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00007946
Linus Torvalds1da177e2005-04-16 15:20:36 -07007947 /*
7948 * Default initial state at registry is that the
7949 * device is present.
7950 */
7951
7952 set_bit(__LINK_STATE_PRESENT, &dev->state);
7953
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01007954 linkwatch_init_dev(dev);
7955
Linus Torvalds1da177e2005-04-16 15:20:36 -07007956 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007957 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007958 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04007959 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007960
Jiri Pirko948b3372013-01-08 01:38:25 +00007961 /* If the device has permanent device address, driver should
7962 * set dev_addr and also addr_assign_type should be set to
7963 * NET_ADDR_PERM (default value).
7964 */
7965 if (dev->addr_assign_type == NET_ADDR_PERM)
7966 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7967
Linus Torvalds1da177e2005-04-16 15:20:36 -07007968 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007969 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07007970 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007971 if (ret) {
7972 rollback_registered(dev);
7973 dev->reg_state = NETREG_UNREGISTERED;
7974 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007975 /*
7976 * Prevent userspace races by waiting until the network
7977 * device is fully setup before sending notifications.
7978 */
Patrick McHardya2835762010-02-26 06:34:51 +00007979 if (!dev->rtnl_link_ops ||
7980 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007981 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007982
7983out:
7984 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007985
7986err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007987 if (dev->netdev_ops->ndo_uninit)
7988 dev->netdev_ops->ndo_uninit(dev);
David S. Millercf124db2017-05-08 12:52:56 -04007989 if (dev->priv_destructor)
7990 dev->priv_destructor(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007991 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007992}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007993EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007994
7995/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007996 * init_dummy_netdev - init a dummy network device for NAPI
7997 * @dev: device to init
7998 *
7999 * This takes a network device structure and initialize the minimum
8000 * amount of fields so it can be used to schedule NAPI polls without
8001 * registering a full blown interface. This is to be used by drivers
8002 * that need to tie several hardware interfaces to a single NAPI
8003 * poll scheduler due to HW limitations.
8004 */
8005int init_dummy_netdev(struct net_device *dev)
8006{
8007 /* Clear everything. Note we don't initialize spinlocks
8008 * are they aren't supposed to be taken by any of the
8009 * NAPI code and this dummy netdev is supposed to be
8010 * only ever used for NAPI polls
8011 */
8012 memset(dev, 0, sizeof(struct net_device));
8013
8014 /* make sure we BUG if trying to hit standard
8015 * register/unregister code path
8016 */
8017 dev->reg_state = NETREG_DUMMY;
8018
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08008019 /* NAPI wants this */
8020 INIT_LIST_HEAD(&dev->napi_list);
8021
8022 /* a dummy interface is started by default */
8023 set_bit(__LINK_STATE_PRESENT, &dev->state);
8024 set_bit(__LINK_STATE_START, &dev->state);
8025
Eric Dumazet29b44332010-10-11 10:22:12 +00008026 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8027 * because users of this 'device' dont need to change
8028 * its refcount.
8029 */
8030
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08008031 return 0;
8032}
8033EXPORT_SYMBOL_GPL(init_dummy_netdev);
8034
8035
8036/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008037 * register_netdev - register a network device
8038 * @dev: device to register
8039 *
8040 * Take a completed network device structure and add it to the kernel
8041 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8042 * chain. 0 is returned on success. A negative errno code is returned
8043 * on a failure to set up the device, or if the name is a duplicate.
8044 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07008045 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07008046 * and expands the device name if you passed a format string to
8047 * alloc_netdev.
8048 */
8049int register_netdev(struct net_device *dev)
8050{
8051 int err;
8052
Kirill Tkhaib0f3deb2018-03-14 22:17:28 +03008053 if (rtnl_lock_killable())
8054 return -EINTR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008055 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008056 rtnl_unlock();
8057 return err;
8058}
8059EXPORT_SYMBOL(register_netdev);
8060
Eric Dumazet29b44332010-10-11 10:22:12 +00008061int netdev_refcnt_read(const struct net_device *dev)
8062{
8063 int i, refcnt = 0;
8064
8065 for_each_possible_cpu(i)
8066 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
8067 return refcnt;
8068}
8069EXPORT_SYMBOL(netdev_refcnt_read);
8070
Ben Hutchings2c530402012-07-10 10:55:09 +00008071/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008072 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00008073 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07008074 *
8075 * This is called when unregistering network devices.
8076 *
8077 * Any protocol or device that holds a reference should register
8078 * for netdevice notification, and cleanup and put back the
8079 * reference if they receive an UNREGISTER event.
8080 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008081 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008082 */
8083static void netdev_wait_allrefs(struct net_device *dev)
8084{
8085 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00008086 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008087
Eric Dumazete014deb2009-11-17 05:59:21 +00008088 linkwatch_forget_dev(dev);
8089
Linus Torvalds1da177e2005-04-16 15:20:36 -07008090 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00008091 refcnt = netdev_refcnt_read(dev);
8092
8093 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008094 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08008095 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008096
8097 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07008098 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008099
Eric Dumazet748e2d92012-08-22 21:50:59 +00008100 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00008101 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00008102 rtnl_lock();
8103
Linus Torvalds1da177e2005-04-16 15:20:36 -07008104 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
8105 &dev->state)) {
8106 /* We must not have linkwatch events
8107 * pending on unregister. If this
8108 * happens, we simply run the queue
8109 * unscheduled, resulting in a noop
8110 * for this device.
8111 */
8112 linkwatch_run_queue();
8113 }
8114
Stephen Hemminger6756ae42006-03-20 22:23:58 -08008115 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008116
8117 rebroadcast_time = jiffies;
8118 }
8119
8120 msleep(250);
8121
Eric Dumazet29b44332010-10-11 10:22:12 +00008122 refcnt = netdev_refcnt_read(dev);
8123
Linus Torvalds1da177e2005-04-16 15:20:36 -07008124 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008125 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8126 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127 warning_time = jiffies;
8128 }
8129 }
8130}
8131
8132/* The sequence is:
8133 *
8134 * rtnl_lock();
8135 * ...
8136 * register_netdevice(x1);
8137 * register_netdevice(x2);
8138 * ...
8139 * unregister_netdevice(y1);
8140 * unregister_netdevice(y2);
8141 * ...
8142 * rtnl_unlock();
8143 * free_netdev(y1);
8144 * free_netdev(y2);
8145 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07008146 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07008147 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07008148 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07008149 * without deadlocking with linkwatch via keventd.
8150 * 2) Since we run with the RTNL semaphore not held, we can sleep
8151 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07008152 *
8153 * We must not return until all unregister events added during
8154 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008155 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008156void netdev_run_todo(void)
8157{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07008158 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008159
Linus Torvalds1da177e2005-04-16 15:20:36 -07008160 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07008161 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07008162
8163 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07008164
Eric Dumazet0115e8e2012-08-22 17:19:46 +00008165
8166 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00008167 if (!list_empty(&list))
8168 rcu_barrier();
8169
Linus Torvalds1da177e2005-04-16 15:20:36 -07008170 while (!list_empty(&list)) {
8171 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00008172 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008173 list_del(&dev->todo_list);
8174
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07008175 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008176 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07008177 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07008178 dump_stack();
8179 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008180 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07008181
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07008182 dev->reg_state = NETREG_UNREGISTERED;
8183
8184 netdev_wait_allrefs(dev);
8185
8186 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00008187 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08008188 BUG_ON(!list_empty(&dev->ptype_all));
8189 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00008190 WARN_ON(rcu_access_pointer(dev->ip_ptr));
8191 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
David Ahern330c7272018-02-13 08:52:00 -08008192#if IS_ENABLED(CONFIG_DECNET)
Ilpo Järvinen547b7922008-07-25 21:43:18 -07008193 WARN_ON(dev->dn_ptr);
David Ahern330c7272018-02-13 08:52:00 -08008194#endif
David S. Millercf124db2017-05-08 12:52:56 -04008195 if (dev->priv_destructor)
8196 dev->priv_destructor(dev);
8197 if (dev->needs_free_netdev)
8198 free_netdev(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07008199
Eric W. Biederman50624c92013-09-23 21:19:49 -07008200 /* Report a network device has been unregistered */
8201 rtnl_lock();
8202 dev_net(dev)->dev_unreg_count--;
8203 __rtnl_unlock();
8204 wake_up(&netdev_unregistering_wq);
8205
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07008206 /* Free network device */
8207 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008209}
8210
Jarod Wilson92566452016-02-01 18:51:04 -05008211/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
8212 * all the same fields in the same order as net_device_stats, with only
8213 * the type differing, but rtnl_link_stats64 may have additional fields
8214 * at the end for newer counters.
Ben Hutchings3cfde792010-07-09 09:11:52 +00008215 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00008216void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
8217 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00008218{
8219#if BITS_PER_LONG == 64
Jarod Wilson92566452016-02-01 18:51:04 -05008220 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
Alban Browaeys9af99592017-07-03 03:20:13 +02008221 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
Jarod Wilson92566452016-02-01 18:51:04 -05008222 /* zero out counters that only exist in rtnl_link_stats64 */
8223 memset((char *)stats64 + sizeof(*netdev_stats), 0,
8224 sizeof(*stats64) - sizeof(*netdev_stats));
Ben Hutchings3cfde792010-07-09 09:11:52 +00008225#else
Jarod Wilson92566452016-02-01 18:51:04 -05008226 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
Ben Hutchings3cfde792010-07-09 09:11:52 +00008227 const unsigned long *src = (const unsigned long *)netdev_stats;
8228 u64 *dst = (u64 *)stats64;
8229
Jarod Wilson92566452016-02-01 18:51:04 -05008230 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00008231 for (i = 0; i < n; i++)
8232 dst[i] = src[i];
Jarod Wilson92566452016-02-01 18:51:04 -05008233 /* zero out counters that only exist in rtnl_link_stats64 */
8234 memset((char *)stats64 + n * sizeof(u64), 0,
8235 sizeof(*stats64) - n * sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00008236#endif
8237}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00008238EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00008239
Eric Dumazetd83345a2009-11-16 03:36:51 +00008240/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08008241 * dev_get_stats - get network device statistics
8242 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07008243 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08008244 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00008245 * Get network statistics from device. Return @storage.
8246 * The device driver may provide its own method by setting
8247 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8248 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08008249 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00008250struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
8251 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00008252{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08008253 const struct net_device_ops *ops = dev->netdev_ops;
8254
Eric Dumazet28172732010-07-07 14:58:56 -07008255 if (ops->ndo_get_stats64) {
8256 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00008257 ops->ndo_get_stats64(dev, storage);
8258 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00008259 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00008260 } else {
8261 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07008262 }
Eric Dumazet6f64ec72017-06-27 07:02:20 -07008263 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
8264 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
8265 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
Eric Dumazet28172732010-07-07 14:58:56 -07008266 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07008267}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08008268EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07008269
Eric Dumazet24824a02010-10-02 06:11:55 +00008270struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07008271{
Eric Dumazet24824a02010-10-02 06:11:55 +00008272 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07008273
Eric Dumazet24824a02010-10-02 06:11:55 +00008274#ifdef CONFIG_NET_CLS_ACT
8275 if (queue)
8276 return queue;
8277 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
8278 if (!queue)
8279 return NULL;
8280 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08008281 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00008282 queue->qdisc_sleeping = &noop_qdisc;
8283 rcu_assign_pointer(dev->ingress_queue, queue);
8284#endif
8285 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07008286}
8287
Eric Dumazet2c60db02012-09-16 09:17:26 +00008288static const struct ethtool_ops default_ethtool_ops;
8289
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00008290void netdev_set_default_ethtool_ops(struct net_device *dev,
8291 const struct ethtool_ops *ops)
8292{
8293 if (dev->ethtool_ops == &default_ethtool_ops)
8294 dev->ethtool_ops = ops;
8295}
8296EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
8297
Eric Dumazet74d332c2013-10-30 13:10:44 -07008298void netdev_freemem(struct net_device *dev)
8299{
8300 char *addr = (char *)dev - dev->padded;
8301
WANG Cong4cb28972014-06-02 15:55:22 -07008302 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07008303}
8304
Linus Torvalds1da177e2005-04-16 15:20:36 -07008305/**
tcharding722c9a02017-02-09 17:56:04 +11008306 * alloc_netdev_mqs - allocate network device
8307 * @sizeof_priv: size of private data to allocate space for
8308 * @name: device name format string
8309 * @name_assign_type: origin of device name
8310 * @setup: callback to initialize device
8311 * @txqs: the number of TX subqueues to allocate
8312 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07008313 *
tcharding722c9a02017-02-09 17:56:04 +11008314 * Allocates a struct net_device with private data area for driver use
8315 * and performs basic initialization. Also allocates subqueue structs
8316 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008317 */
Tom Herbert36909ea2011-01-09 19:36:31 +00008318struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02008319 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00008320 void (*setup)(struct net_device *),
8321 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008322{
Linus Torvalds1da177e2005-04-16 15:20:36 -07008323 struct net_device *dev;
Alexey Dobriyan52a59bd2017-09-21 23:33:29 +03008324 unsigned int alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00008325 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008326
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07008327 BUG_ON(strlen(name) >= sizeof(dev->name));
8328
Tom Herbert36909ea2011-01-09 19:36:31 +00008329 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008330 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00008331 return NULL;
8332 }
8333
Tom Herbert36909ea2011-01-09 19:36:31 +00008334 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008335 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00008336 return NULL;
8337 }
Tom Herbert36909ea2011-01-09 19:36:31 +00008338
David S. Millerfd2ea0a2008-07-17 01:56:23 -07008339 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07008340 if (sizeof_priv) {
8341 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00008342 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07008343 alloc_size += sizeof_priv;
8344 }
8345 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00008346 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008347
Michal Hockodcda9b02017-07-12 14:36:45 -07008348 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Joe Perches62b59422013-02-04 16:48:16 +00008349 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008350 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008351
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00008352 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008353 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008354
Eric Dumazet29b44332010-10-11 10:22:12 +00008355 dev->pcpu_refcnt = alloc_percpu(int);
8356 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07008357 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008358
Linus Torvalds1da177e2005-04-16 15:20:36 -07008359 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00008360 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008361
Jiri Pirko22bedad32010-04-01 21:22:57 +00008362 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00008363 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00008364
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09008365 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008366
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07008367 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00008368 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008369
Herbert Xud565b0a2008-12-15 23:38:52 -08008370 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00008371 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07008372 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00008373 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02008374 INIT_LIST_HEAD(&dev->adj_list.upper);
8375 INIT_LIST_HEAD(&dev->adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08008376 INIT_LIST_HEAD(&dev->ptype_all);
8377 INIT_LIST_HEAD(&dev->ptype_specific);
Jiri Kosina59cc1f62016-08-10 11:05:15 +02008378#ifdef CONFIG_NET_SCHED
8379 hash_init(dev->qdisc_hash);
8380#endif
Eric Dumazet02875872014-10-05 18:38:35 -07008381 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008382 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008383
Phil Suttera8131042016-02-17 15:37:43 +01008384 if (!dev->tx_queue_len) {
Phil Sutterf84bb1e2015-08-27 21:21:36 +02008385 dev->priv_flags |= IFF_NO_QUEUE;
Jesper Dangaard Brouer11597082016-11-03 14:56:06 +01008386 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
Phil Suttera8131042016-02-17 15:37:43 +01008387 }
Phil Sutter906470c2015-08-18 10:30:48 +02008388
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008389 dev->num_tx_queues = txqs;
8390 dev->real_num_tx_queues = txqs;
8391 if (netif_alloc_netdev_queues(dev))
8392 goto free_all;
8393
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008394 dev->num_rx_queues = rxqs;
8395 dev->real_num_rx_queues = rxqs;
8396 if (netif_alloc_rx_queues(dev))
8397 goto free_all;
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008398
Linus Torvalds1da177e2005-04-16 15:20:36 -07008399 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02008400 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00008401 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00008402 if (!dev->ethtool_ops)
8403 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +02008404
8405 nf_hook_ingress_init(dev);
8406
Linus Torvalds1da177e2005-04-16 15:20:36 -07008407 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008408
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008409free_all:
8410 free_netdev(dev);
8411 return NULL;
8412
Eric Dumazet29b44332010-10-11 10:22:12 +00008413free_pcpu:
8414 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07008415free_dev:
8416 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008417 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008418}
Tom Herbert36909ea2011-01-09 19:36:31 +00008419EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008420
8421/**
tcharding722c9a02017-02-09 17:56:04 +11008422 * free_netdev - free network device
8423 * @dev: device
Linus Torvalds1da177e2005-04-16 15:20:36 -07008424 *
tcharding722c9a02017-02-09 17:56:04 +11008425 * This function does the last stage of destroying an allocated device
8426 * interface. The reference to the device object is released. If this
8427 * is the last reference then it will be freed.Must be called in process
8428 * context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008429 */
8430void free_netdev(struct net_device *dev)
8431{
Herbert Xud565b0a2008-12-15 23:38:52 -08008432 struct napi_struct *p, *n;
8433
Eric Dumazet93d05d42015-11-18 06:31:03 -08008434 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -07008435 netif_free_tx_queues(dev);
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01008436 netif_free_rx_queues(dev);
David S. Millere8a04642008-07-17 00:34:19 -07008437
Eric Dumazet33d480c2011-08-11 19:30:52 +00008438 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00008439
Jiri Pirkof001fde2009-05-05 02:48:28 +00008440 /* Flush device addresses */
8441 dev_addr_flush(dev);
8442
Herbert Xud565b0a2008-12-15 23:38:52 -08008443 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8444 netif_napi_del(p);
8445
Eric Dumazet29b44332010-10-11 10:22:12 +00008446 free_percpu(dev->pcpu_refcnt);
8447 dev->pcpu_refcnt = NULL;
8448
Stephen Hemminger3041a062006-05-26 13:25:24 -07008449 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008450 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07008451 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008452 return;
8453 }
8454
8455 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8456 dev->reg_state = NETREG_RELEASED;
8457
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07008458 /* will free via device release */
8459 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008460}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008461EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008462
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008463/**
8464 * synchronize_net - Synchronize with packet receive processing
8465 *
8466 * Wait for packets currently being received to be done.
8467 * Does not block later packets from starting.
8468 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008469void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008470{
8471 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00008472 if (rtnl_is_locked())
8473 synchronize_rcu_expedited();
8474 else
8475 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008476}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008477EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008478
8479/**
Eric Dumazet44a08732009-10-27 07:03:04 +00008480 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07008481 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00008482 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08008483 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008484 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008485 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00008486 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008487 *
8488 * Callers must hold the rtnl semaphore. You may want
8489 * unregister_netdev() instead of this.
8490 */
8491
Eric Dumazet44a08732009-10-27 07:03:04 +00008492void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008493{
Herbert Xua6620712007-12-12 19:21:56 -08008494 ASSERT_RTNL();
8495
Eric Dumazet44a08732009-10-27 07:03:04 +00008496 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00008497 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00008498 } else {
8499 rollback_registered(dev);
8500 /* Finish processing unregister after unlock */
8501 net_set_todo(dev);
8502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008503}
Eric Dumazet44a08732009-10-27 07:03:04 +00008504EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008505
8506/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008507 * unregister_netdevice_many - unregister many devices
8508 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07008509 *
8510 * Note: As most callers use a stack allocated list_head,
8511 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008512 */
8513void unregister_netdevice_many(struct list_head *head)
8514{
8515 struct net_device *dev;
8516
8517 if (!list_empty(head)) {
8518 rollback_registered_many(head);
8519 list_for_each_entry(dev, head, unreg_list)
8520 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07008521 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008522 }
8523}
Eric Dumazet63c80992009-10-27 07:06:49 +00008524EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008525
8526/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008527 * unregister_netdev - remove device from the kernel
8528 * @dev: device
8529 *
8530 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008531 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008532 *
8533 * This is just a wrapper for unregister_netdevice that takes
8534 * the rtnl semaphore. In general you want to use this and not
8535 * unregister_netdevice.
8536 */
8537void unregister_netdev(struct net_device *dev)
8538{
8539 rtnl_lock();
8540 unregister_netdevice(dev);
8541 rtnl_unlock();
8542}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008543EXPORT_SYMBOL(unregister_netdev);
8544
Eric W. Biedermance286d32007-09-12 13:53:49 +02008545/**
8546 * dev_change_net_namespace - move device to different nethost namespace
8547 * @dev: device
8548 * @net: network namespace
8549 * @pat: If not NULL name pattern to try if the current device name
8550 * is already taken in the destination network namespace.
8551 *
8552 * This function shuts down a device interface and moves it
8553 * to a new network namespace. On success 0 is returned, on
8554 * a failure a netagive errno code is returned.
8555 *
8556 * Callers must hold the rtnl semaphore.
8557 */
8558
8559int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8560{
Nicolas Dichtel38e01b32018-01-25 15:01:39 +01008561 int err, new_nsid, new_ifindex;
Eric W. Biedermance286d32007-09-12 13:53:49 +02008562
8563 ASSERT_RTNL();
8564
8565 /* Don't allow namespace local devices to be moved. */
8566 err = -EINVAL;
8567 if (dev->features & NETIF_F_NETNS_LOCAL)
8568 goto out;
8569
8570 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008571 if (dev->reg_state != NETREG_REGISTERED)
8572 goto out;
8573
8574 /* Get out if there is nothing todo */
8575 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09008576 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008577 goto out;
8578
8579 /* Pick the destination device name, and ensure
8580 * we can use it in the destination network namespace.
8581 */
8582 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00008583 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008584 /* We get here if we can't use the current device name */
8585 if (!pat)
8586 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00008587 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008588 goto out;
8589 }
8590
8591 /*
8592 * And now a mini version of register_netdevice unregister_netdevice.
8593 */
8594
8595 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07008596 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008597
8598 /* And unlink it from device chain */
8599 err = -ENODEV;
8600 unlist_netdevice(dev);
8601
8602 synchronize_net();
8603
8604 /* Shutdown queueing discipline. */
8605 dev_shutdown(dev);
8606
8607 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11008608 * this device. They should clean all the things.
8609 *
8610 * Note that dev->reg_state stays at NETREG_REGISTERED.
8611 * This is wanted because this way 8021q and macvlan know
8612 * the device is just moving and can keep their slaves up.
8613 */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008614 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00008615 rcu_barrier();
Nicolas Dichtel38e01b32018-01-25 15:01:39 +01008616
Nicolas Dichtelc36ac8e2018-01-25 15:01:38 +01008617 new_nsid = peernet2id_alloc(dev_net(dev), net);
Nicolas Dichtel38e01b32018-01-25 15:01:39 +01008618 /* If there is an ifindex conflict assign a new one */
8619 if (__dev_get_by_index(net, dev->ifindex))
8620 new_ifindex = dev_new_index(net);
8621 else
8622 new_ifindex = dev->ifindex;
8623
8624 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
8625 new_ifindex);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008626
8627 /*
8628 * Flush the unicast and multicast chains
8629 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00008630 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00008631 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008632
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008633 /* Send a netdev-removed uevent to the old namespace */
8634 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008635 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008636
Eric W. Biedermance286d32007-09-12 13:53:49 +02008637 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09008638 dev_net_set(dev, net);
Nicolas Dichtel38e01b32018-01-25 15:01:39 +01008639 dev->ifindex = new_ifindex;
Eric W. Biedermance286d32007-09-12 13:53:49 +02008640
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008641 /* Send a netdev-add uevent to the new namespace */
8642 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008643 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008644
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008645 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07008646 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008647 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008648
8649 /* Add the device back in the hashes */
8650 list_netdevice(dev);
8651
8652 /* Notify protocols, that a new device appeared. */
8653 call_netdevice_notifiers(NETDEV_REGISTER, dev);
8654
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008655 /*
8656 * Prevent userspace races by waiting until the network
8657 * device is fully setup before sending notifications.
8658 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008659 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008660
Eric W. Biedermance286d32007-09-12 13:53:49 +02008661 synchronize_net();
8662 err = 0;
8663out:
8664 return err;
8665}
Johannes Berg463d0182009-07-14 00:33:35 +02008666EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008667
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008668static int dev_cpu_dead(unsigned int oldcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008669{
8670 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008671 struct sk_buff *skb;
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008672 unsigned int cpu;
Ashwanth Goli97d8b6e2017-06-13 16:54:55 +05308673 struct softnet_data *sd, *oldsd, *remsd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008674
Linus Torvalds1da177e2005-04-16 15:20:36 -07008675 local_irq_disable();
8676 cpu = smp_processor_id();
8677 sd = &per_cpu(softnet_data, cpu);
8678 oldsd = &per_cpu(softnet_data, oldcpu);
8679
8680 /* Find end of our completion_queue. */
8681 list_skb = &sd->completion_queue;
8682 while (*list_skb)
8683 list_skb = &(*list_skb)->next;
8684 /* Append completion queue from offline CPU. */
8685 *list_skb = oldsd->completion_queue;
8686 oldsd->completion_queue = NULL;
8687
Linus Torvalds1da177e2005-04-16 15:20:36 -07008688 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00008689 if (oldsd->output_queue) {
8690 *sd->output_queue_tailp = oldsd->output_queue;
8691 sd->output_queue_tailp = oldsd->output_queue_tailp;
8692 oldsd->output_queue = NULL;
8693 oldsd->output_queue_tailp = &oldsd->output_queue;
8694 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008695 /* Append NAPI poll list from offline CPU, with one exception :
8696 * process_backlog() must be called by cpu owning percpu backlog.
8697 * We properly handle process_queue & input_pkt_queue later.
8698 */
8699 while (!list_empty(&oldsd->poll_list)) {
8700 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8701 struct napi_struct,
8702 poll_list);
8703
8704 list_del_init(&napi->poll_list);
8705 if (napi->poll == process_backlog)
8706 napi->state = 0;
8707 else
8708 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00008709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008710
8711 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8712 local_irq_enable();
8713
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05308714#ifdef CONFIG_RPS
8715 remsd = oldsd->rps_ipi_list;
8716 oldsd->rps_ipi_list = NULL;
8717#endif
8718 /* send out pending IPI's on offline CPU */
8719 net_rps_send_ipi(remsd);
8720
Linus Torvalds1da177e2005-04-16 15:20:36 -07008721 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00008722 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008723 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008724 input_queue_head_incr(oldsd);
8725 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008726 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008727 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008728 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07008729 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008730
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008731 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008732}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008733
Herbert Xu7f353bf2007-08-10 15:47:58 -07008734/**
Herbert Xub63365a2008-10-23 01:11:29 -07008735 * netdev_increment_features - increment feature set by one
8736 * @all: current feature set
8737 * @one: new feature set
8738 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07008739 *
8740 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07008741 * @one to the master device with current feature set @all. Will not
8742 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07008743 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00008744netdev_features_t netdev_increment_features(netdev_features_t all,
8745 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07008746{
Tom Herbertc8cd0982015-12-14 11:19:44 -08008747 if (mask & NETIF_F_HW_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08008748 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +00008749 mask |= NETIF_F_VLAN_CHALLENGED;
8750
Tom Herberta1882222015-12-14 11:19:43 -08008751 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +00008752 all &= one | ~NETIF_F_ALL_FOR_ALL;
8753
Michał Mirosław1742f182011-04-22 06:31:16 +00008754 /* If one device supports hw checksumming, set for all. */
Tom Herbertc8cd0982015-12-14 11:19:44 -08008755 if (all & NETIF_F_HW_CSUM)
8756 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008757
8758 return all;
8759}
Herbert Xub63365a2008-10-23 01:11:29 -07008760EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008761
Baruch Siach430f03c2013-06-02 20:43:55 +00008762static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008763{
8764 int i;
8765 struct hlist_head *hash;
8766
8767 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8768 if (hash != NULL)
8769 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8770 INIT_HLIST_HEAD(&hash[i]);
8771
8772 return hash;
8773}
8774
Eric W. Biederman881d9662007-09-17 11:56:21 -07008775/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07008776static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008777{
Rustad, Mark D734b6542012-07-18 09:06:07 +00008778 if (net != &init_net)
8779 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07008780
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008781 net->dev_name_head = netdev_create_hash();
8782 if (net->dev_name_head == NULL)
8783 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008784
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008785 net->dev_index_head = netdev_create_hash();
8786 if (net->dev_index_head == NULL)
8787 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008788
8789 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008790
8791err_idx:
8792 kfree(net->dev_name_head);
8793err_name:
8794 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008795}
8796
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008797/**
8798 * netdev_drivername - network driver for the device
8799 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008800 *
8801 * Determine network driver for device.
8802 */
David S. Miller3019de12011-06-06 16:41:33 -07008803const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07008804{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07008805 const struct device_driver *driver;
8806 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07008807 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07008808
8809 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008810 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07008811 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008812
8813 driver = parent->driver;
8814 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07008815 return driver->name;
8816 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008817}
8818
Joe Perches6ea754e2014-09-22 11:10:50 -07008819static void __netdev_printk(const char *level, const struct net_device *dev,
8820 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00008821{
Joe Perchesb004ff42012-09-12 20:12:19 -07008822 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008823 dev_printk_emit(level[1] - '0',
8824 dev->dev.parent,
8825 "%s %s %s%s: %pV",
8826 dev_driver_string(dev->dev.parent),
8827 dev_name(dev->dev.parent),
8828 netdev_name(dev), netdev_reg_state(dev),
8829 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008830 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008831 printk("%s%s%s: %pV",
8832 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008833 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07008834 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008835 }
Joe Perches256df2f2010-06-27 01:02:35 +00008836}
8837
Joe Perches6ea754e2014-09-22 11:10:50 -07008838void netdev_printk(const char *level, const struct net_device *dev,
8839 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00008840{
8841 struct va_format vaf;
8842 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00008843
8844 va_start(args, format);
8845
8846 vaf.fmt = format;
8847 vaf.va = &args;
8848
Joe Perches6ea754e2014-09-22 11:10:50 -07008849 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008850
Joe Perches256df2f2010-06-27 01:02:35 +00008851 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00008852}
8853EXPORT_SYMBOL(netdev_printk);
8854
8855#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07008856void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00008857{ \
Joe Perches256df2f2010-06-27 01:02:35 +00008858 struct va_format vaf; \
8859 va_list args; \
8860 \
8861 va_start(args, fmt); \
8862 \
8863 vaf.fmt = fmt; \
8864 vaf.va = &args; \
8865 \
Joe Perches6ea754e2014-09-22 11:10:50 -07008866 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07008867 \
Joe Perches256df2f2010-06-27 01:02:35 +00008868 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00008869} \
8870EXPORT_SYMBOL(func);
8871
8872define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8873define_netdev_printk_level(netdev_alert, KERN_ALERT);
8874define_netdev_printk_level(netdev_crit, KERN_CRIT);
8875define_netdev_printk_level(netdev_err, KERN_ERR);
8876define_netdev_printk_level(netdev_warn, KERN_WARNING);
8877define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8878define_netdev_printk_level(netdev_info, KERN_INFO);
8879
Pavel Emelyanov46650792007-10-08 20:38:39 -07008880static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008881{
8882 kfree(net->dev_name_head);
8883 kfree(net->dev_index_head);
Vasily Averinee21b18b2017-11-12 22:28:46 +03008884 if (net != &init_net)
8885 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
Eric W. Biederman881d9662007-09-17 11:56:21 -07008886}
8887
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008888static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07008889 .init = netdev_init,
8890 .exit = netdev_exit,
8891};
8892
Pavel Emelyanov46650792007-10-08 20:38:39 -07008893static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008894{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008895 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02008896 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008897 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02008898 * initial network namespace
8899 */
8900 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008901 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008902 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008903 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02008904
8905 /* Ignore unmoveable devices (i.e. loopback) */
8906 if (dev->features & NETIF_F_NETNS_LOCAL)
8907 continue;
8908
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008909 /* Leave virtual devices for the generic cleanup */
8910 if (dev->rtnl_link_ops)
8911 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08008912
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008913 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008914 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8915 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008916 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008917 pr_emerg("%s: failed to move %s to init_net: %d\n",
8918 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008919 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02008920 }
8921 }
8922 rtnl_unlock();
8923}
8924
Eric W. Biederman50624c92013-09-23 21:19:49 -07008925static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8926{
8927 /* Return with the rtnl_lock held when there are no network
8928 * devices unregistering in any network namespace in net_list.
8929 */
8930 struct net *net;
8931 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01008932 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008933
Peter Zijlstraff960a72014-10-29 17:04:56 +01008934 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008935 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07008936 unregistering = false;
8937 rtnl_lock();
8938 list_for_each_entry(net, net_list, exit_list) {
8939 if (net->dev_unreg_count > 0) {
8940 unregistering = true;
8941 break;
8942 }
8943 }
8944 if (!unregistering)
8945 break;
8946 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01008947
8948 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008949 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01008950 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008951}
8952
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008953static void __net_exit default_device_exit_batch(struct list_head *net_list)
8954{
8955 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04008956 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008957 * Do this across as many network namespaces as possible to
8958 * improve batching efficiency.
8959 */
8960 struct net_device *dev;
8961 struct net *net;
8962 LIST_HEAD(dev_kill_list);
8963
Eric W. Biederman50624c92013-09-23 21:19:49 -07008964 /* To prevent network device cleanup code from dereferencing
8965 * loopback devices or network devices that have been freed
8966 * wait here for all pending unregistrations to complete,
8967 * before unregistring the loopback device and allowing the
8968 * network namespace be freed.
8969 *
8970 * The netdev todo list containing all network devices
8971 * unregistrations that happen in default_device_exit_batch
8972 * will run in the rtnl_unlock() at the end of
8973 * default_device_exit_batch.
8974 */
8975 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008976 list_for_each_entry(net, net_list, exit_list) {
8977 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02008978 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008979 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8980 else
8981 unregister_netdevice_queue(dev, &dev_kill_list);
8982 }
8983 }
8984 unregister_netdevice_many(&dev_kill_list);
8985 rtnl_unlock();
8986}
8987
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008988static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008989 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008990 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02008991};
8992
Linus Torvalds1da177e2005-04-16 15:20:36 -07008993/*
8994 * Initialize the DEV module. At boot time this walks the device list and
8995 * unhooks any devices that fail to initialise (normally hardware not
8996 * present) and leaves us with a valid list of present and active devices.
8997 *
8998 */
8999
9000/*
9001 * This is called single threaded during boot, so no need
9002 * to take the rtnl semaphore.
9003 */
9004static int __init net_dev_init(void)
9005{
9006 int i, rc = -ENOMEM;
9007
9008 BUG_ON(!dev_boot_phase);
9009
Linus Torvalds1da177e2005-04-16 15:20:36 -07009010 if (dev_proc_init())
9011 goto out;
9012
Eric W. Biederman8b41d182007-09-26 22:02:53 -07009013 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07009014 goto out;
9015
9016 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08009017 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009018 INIT_LIST_HEAD(&ptype_base[i]);
9019
Vlad Yasevich62532da2012-11-15 08:49:10 +00009020 INIT_LIST_HEAD(&offload_base);
9021
Eric W. Biederman881d9662007-09-17 11:56:21 -07009022 if (register_pernet_subsys(&netdev_net_ops))
9023 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009024
9025 /*
9026 * Initialise the packet receive queues.
9027 */
9028
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07009029 for_each_possible_cpu(i) {
Eric Dumazet41852492016-08-26 12:50:39 -07009030 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00009031 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009032
Eric Dumazet41852492016-08-26 12:50:39 -07009033 INIT_WORK(flush, flush_backlog);
9034
Eric Dumazete36fa2f2010-04-19 21:17:14 +00009035 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07009036 skb_queue_head_init(&sd->process_queue);
Steffen Klassertf53c7232017-12-20 10:41:36 +01009037#ifdef CONFIG_XFRM_OFFLOAD
9038 skb_queue_head_init(&sd->xfrm_backlog);
9039#endif
Eric Dumazete36fa2f2010-04-19 21:17:14 +00009040 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00009041 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00009042#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00009043 sd->csd.func = rps_trigger_softirq;
9044 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00009045 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07009046#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00009047
Eric Dumazete36fa2f2010-04-19 21:17:14 +00009048 sd->backlog.poll = process_backlog;
9049 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009050 }
9051
Linus Torvalds1da177e2005-04-16 15:20:36 -07009052 dev_boot_phase = 0;
9053
Eric W. Biederman505d4f72008-11-07 22:54:20 -08009054 /* The loopback device is special if any other network devices
9055 * is present in a network namespace the loopback device must
9056 * be present. Since we now dynamically allocate and free the
9057 * loopback device ensure this invariant is maintained by
9058 * keeping the loopback device as the first device on the
9059 * list of network devices. Ensuring the loopback devices
9060 * is the first device that appears and the last network device
9061 * that disappears.
9062 */
9063 if (register_pernet_device(&loopback_net_ops))
9064 goto out;
9065
9066 if (register_pernet_device(&default_device_ops))
9067 goto out;
9068
Carlos R. Mafra962cf362008-05-15 11:15:37 -03009069 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
9070 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009071
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01009072 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
9073 NULL, dev_cpu_dead);
9074 WARN_ON(rc < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009075 rc = 0;
9076out:
9077 return rc;
9078}
9079
9080subsys_initcall(net_dev_init);