blob: 8658074ecad60b44ac7db2ac229773270eebdb65 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
tcharding722c9a02017-02-09 17:56:04 +11002 * NET3 Protocol independent device support routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
tcharding722c9a02017-02-09 17:56:04 +110010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
tcharding722c9a02017-02-09 17:56:04 +110024 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
tcharding722c9a02017-02-09 17:56:04 +110039 * Alan Cox : Fix ETH_P_ALL echoback lengths.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
tcharding722c9a02017-02-09 17:56:04 +110049 * Alan Cox : Fixed nasty side effect of device close
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
tcharding722c9a02017-02-09 17:56:04 +110070 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * - netif_rx() feedback
73 */
74
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080075#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070084#include <linux/sched/mm.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080085#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070095#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/notifier.h>
97#include <linux/skbuff.h>
Brenden Blancoa7862b42016-07-19 12:16:48 -070098#include <linux/bpf.h>
David S. Millerb5cdae32017-04-18 15:36:58 -040099#include <linux/bpf_trace.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200100#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -0800102#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700106#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/pkt_sched.h>
Jiri Pirko87d83092017-05-17 11:07:54 +0200108#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000110#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/highmem.h>
112#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500119#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700120#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700121#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700122#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700123#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700124#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700126#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700127#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700128#include <linux/ipv6.h>
129#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700130#include <linux/jhash.h>
131#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700132#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900133#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900134#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000135#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700136#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000137#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100138#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300139#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700140#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100141#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400142#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800143#include <linux/hrtimer.h>
Pablo Neirae687ad62015-05-13 18:19:38 +0200144#include <linux/netfilter_ingress.h>
Hariprasad Shenai40e4e712016-06-08 18:09:08 +0530145#include <linux/crash_dump.h>
Davide Carattib72b5bf2017-05-18 15:44:38 +0200146#include <linux/sctp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700148#include "net-sysfs.h"
149
Herbert Xud565b0a2008-12-15 23:38:52 -0800150/* Instead of increasing this, you should create a hash table. */
151#define MAX_GRO_SKBS 8
152
Herbert Xu5d38a072009-01-04 16:13:40 -0800153/* This should be increased if a protocol with a bigger head is added. */
154#define GRO_MAX_HEAD (MAX_HEADER + 128)
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000157static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000158struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
159struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000160static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000162static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli54951192014-07-01 21:39:43 -0700163static int call_netdevice_notifiers_info(unsigned long val,
164 struct net_device *dev,
165 struct netdev_notifier_info *info);
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200166static struct napi_struct *napi_by_id(unsigned int napi_id);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700169 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 * semaphore.
171 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800172 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 *
174 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * actual updates. This allows pure readers to access the list even
177 * while a writer is preparing to update it.
178 *
179 * To put it another way, dev_base_lock is held for writing only to
180 * protect against pure readers; the rtnl semaphore provides the
181 * protection against other writers.
182 *
183 * See, for example usages, register_netdevice() and
184 * unregister_netdevice(), which must be called with the rtnl
185 * semaphore held.
186 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188EXPORT_SYMBOL(dev_base_lock);
189
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300190/* protects napi_hash addition/deletion and napi_gen_id */
191static DEFINE_SPINLOCK(napi_hash_lock);
192
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800193static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800194static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300195
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200196static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000197
Thomas Graf4e985ad2011-06-21 03:11:20 +0000198static inline void dev_base_seq_inc(struct net *net)
199{
tcharding643aa9c2017-02-09 17:56:05 +1100200 while (++net->dev_base_seq == 0)
201 ;
Thomas Graf4e985ad2011-06-21 03:11:20 +0000202}
203
Eric W. Biederman881d9662007-09-17 11:56:21 -0700204static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Linus Torvalds8387ff22016-06-10 07:51:30 -0700206 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
Eric Dumazet95c96172012-04-15 05:58:06 +0000207
stephen hemminger08e98972009-11-10 07:20:34 +0000208 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biederman881d9662007-09-17 11:56:21 -0700211static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700213 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000216static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000217{
218#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000219 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000220#endif
221}
222
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000223static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000224{
225#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000226 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000227#endif
228}
229
Eric W. Biedermance286d32007-09-12 13:53:49 +0200230/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000231static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200232{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900233 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234
235 ASSERT_RTNL();
236
237 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800238 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000239 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000240 hlist_add_head_rcu(&dev->index_hlist,
241 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200242 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000243
244 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200245}
246
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000247/* Device list removal
248 * caller must respect a RCU grace period before freeing/reusing dev
249 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200250static void unlist_netdevice(struct net_device *dev)
251{
252 ASSERT_RTNL();
253
254 /* Unlink dev from the device chain */
255 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800256 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000257 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000258 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200259 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000260
261 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200262}
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264/*
265 * Our notifier list
266 */
267
Alan Sternf07d5b92006-05-09 15:23:03 -0700268static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
270/*
271 * Device drivers call our routines to queue packets here. We empty the
272 * queue in the local softnet handler.
273 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700274
Eric Dumazet9958da02010-04-17 04:17:02 +0000275DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700276EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
David S. Millercf508b12008-07-22 14:16:42 -0700278#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700279/*
David S. Millerc773e842008-07-08 23:13:53 -0700280 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700281 * according to dev->type
282 */
tcharding643aa9c2017-02-09 17:56:05 +1100283static const unsigned short netdev_lock_type[] = {
284 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700285 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
286 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
287 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
288 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
289 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
290 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
291 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
292 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
293 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
294 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
295 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400296 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
297 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
298 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700299
tcharding643aa9c2017-02-09 17:56:05 +1100300static const char *const netdev_lock_name[] = {
301 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
302 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
303 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
304 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
305 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
306 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
307 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
308 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
309 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
310 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
311 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
312 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
313 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
314 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
315 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700316
317static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700318static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700319
320static inline unsigned short netdev_lock_pos(unsigned short dev_type)
321{
322 int i;
323
324 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
325 if (netdev_lock_type[i] == dev_type)
326 return i;
327 /* the last key is used by default */
328 return ARRAY_SIZE(netdev_lock_type) - 1;
329}
330
David S. Millercf508b12008-07-22 14:16:42 -0700331static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
332 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700333{
334 int i;
335
336 i = netdev_lock_pos(dev_type);
337 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
338 netdev_lock_name[i]);
339}
David S. Millercf508b12008-07-22 14:16:42 -0700340
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
342{
343 int i;
344
345 i = netdev_lock_pos(dev->type);
346 lockdep_set_class_and_name(&dev->addr_list_lock,
347 &netdev_addr_lock_key[i],
348 netdev_lock_name[i]);
349}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700350#else
David S. Millercf508b12008-07-22 14:16:42 -0700351static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
352 unsigned short dev_type)
353{
354}
355static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700356{
357}
358#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100361 *
362 * Protocol management and registration routines
363 *
364 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 * Add a protocol ID to the list. Now that the input handler is
369 * smarter we can dispense with all the messy stuff that used to be
370 * here.
371 *
372 * BEWARE!!! Protocol handlers, mangling input packets,
373 * MUST BE last in hash buckets and checking protocol handlers
374 * MUST start from promiscuous ptype_all chain in net_bh.
375 * It is true now, do not change it.
376 * Explanation follows: if protocol handler, mangling packet, will
377 * be the first on list, it is not able to sense, that packet
378 * is cloned and should be copied-on-write, so that it will
379 * change it and subsequent readers will get broken packet.
380 * --ANK (980803)
381 */
382
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000383static inline struct list_head *ptype_head(const struct packet_type *pt)
384{
385 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800386 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000387 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800388 return pt->dev ? &pt->dev->ptype_specific :
389 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000390}
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392/**
393 * dev_add_pack - add packet handler
394 * @pt: packet type declaration
395 *
396 * Add a protocol handler to the networking stack. The passed &packet_type
397 * is linked into kernel lists and may not be freed until it has been
398 * removed from the kernel lists.
399 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900400 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 * guarantee all CPU's that are in middle of receiving packets
402 * will see the new packet type (until the next received packet).
403 */
404
405void dev_add_pack(struct packet_type *pt)
406{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000407 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000409 spin_lock(&ptype_lock);
410 list_add_rcu(&pt->list, head);
411 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700413EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415/**
416 * __dev_remove_pack - remove packet handler
417 * @pt: packet type declaration
418 *
419 * Remove a protocol handler that was previously added to the kernel
420 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
421 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900422 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 *
424 * The packet type might still be in use by receivers
425 * and must not be freed until after all the CPU's have gone
426 * through a quiescent state.
427 */
428void __dev_remove_pack(struct packet_type *pt)
429{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000430 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 struct packet_type *pt1;
432
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000433 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 list_for_each_entry(pt1, head, list) {
436 if (pt == pt1) {
437 list_del_rcu(&pt->list);
438 goto out;
439 }
440 }
441
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000442 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000444 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700446EXPORT_SYMBOL(__dev_remove_pack);
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448/**
449 * dev_remove_pack - remove packet handler
450 * @pt: packet type declaration
451 *
452 * Remove a protocol handler that was previously added to the kernel
453 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
454 * from the kernel lists and can be freed or reused once this function
455 * returns.
456 *
457 * This call sleeps to guarantee that no CPU is looking at the packet
458 * type after return.
459 */
460void dev_remove_pack(struct packet_type *pt)
461{
462 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 synchronize_net();
465}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700466EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Vlad Yasevich62532da2012-11-15 08:49:10 +0000468
469/**
470 * dev_add_offload - register offload handlers
471 * @po: protocol offload declaration
472 *
473 * Add protocol offload handlers to the networking stack. The passed
474 * &proto_offload is linked into kernel lists and may not be freed until
475 * it has been removed from the kernel lists.
476 *
477 * This call does not sleep therefore it can not
478 * guarantee all CPU's that are in middle of receiving packets
479 * will see the new offload handlers (until the next received packet).
480 */
481void dev_add_offload(struct packet_offload *po)
482{
David S. Millerbdef7de2015-06-01 14:56:09 -0700483 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000484
485 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700486 list_for_each_entry(elem, &offload_base, list) {
487 if (po->priority < elem->priority)
488 break;
489 }
490 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000491 spin_unlock(&offload_lock);
492}
493EXPORT_SYMBOL(dev_add_offload);
494
495/**
496 * __dev_remove_offload - remove offload handler
497 * @po: packet offload declaration
498 *
499 * Remove a protocol offload handler that was previously added to the
500 * kernel offload handlers by dev_add_offload(). The passed &offload_type
501 * is removed from the kernel lists and can be freed or reused once this
502 * function returns.
503 *
504 * The packet type might still be in use by receivers
505 * and must not be freed until after all the CPU's have gone
506 * through a quiescent state.
507 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800508static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000509{
510 struct list_head *head = &offload_base;
511 struct packet_offload *po1;
512
Eric Dumazetc53aa502012-11-16 08:08:23 +0000513 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000514
515 list_for_each_entry(po1, head, list) {
516 if (po == po1) {
517 list_del_rcu(&po->list);
518 goto out;
519 }
520 }
521
522 pr_warn("dev_remove_offload: %p not found\n", po);
523out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000524 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000525}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000526
527/**
528 * dev_remove_offload - remove packet offload handler
529 * @po: packet offload declaration
530 *
531 * Remove a packet offload handler that was previously added to the kernel
532 * offload handlers by dev_add_offload(). The passed &offload_type is
533 * removed from the kernel lists and can be freed or reused once this
534 * function returns.
535 *
536 * This call sleeps to guarantee that no CPU is looking at the packet
537 * type after return.
538 */
539void dev_remove_offload(struct packet_offload *po)
540{
541 __dev_remove_offload(po);
542
543 synchronize_net();
544}
545EXPORT_SYMBOL(dev_remove_offload);
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547/******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100548 *
549 * Device Boot-time Settings Routines
550 *
551 ******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553/* Boot time configuration table */
554static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
555
556/**
557 * netdev_boot_setup_add - add new setup entry
558 * @name: name of the device
559 * @map: configured settings for the device
560 *
561 * Adds new setup entry to the dev_boot_setup list. The function
562 * returns 0 on error and 1 on success. This is a generic routine to
563 * all netdevices.
564 */
565static int netdev_boot_setup_add(char *name, struct ifmap *map)
566{
567 struct netdev_boot_setup *s;
568 int i;
569
570 s = dev_boot_setup;
571 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
572 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
573 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700574 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 memcpy(&s[i].map, map, sizeof(s[i].map));
576 break;
577 }
578 }
579
580 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
581}
582
583/**
tcharding722c9a02017-02-09 17:56:04 +1100584 * netdev_boot_setup_check - check boot time settings
585 * @dev: the netdevice
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 *
tcharding722c9a02017-02-09 17:56:04 +1100587 * Check boot time settings for the device.
588 * The found settings are set for the device to be used
589 * later in the device probing.
590 * Returns 0 if no settings found, 1 if they are.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 */
592int netdev_boot_setup_check(struct net_device *dev)
593{
594 struct netdev_boot_setup *s = dev_boot_setup;
595 int i;
596
597 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
598 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700599 !strcmp(dev->name, s[i].name)) {
tcharding722c9a02017-02-09 17:56:04 +1100600 dev->irq = s[i].map.irq;
601 dev->base_addr = s[i].map.base_addr;
602 dev->mem_start = s[i].map.mem_start;
603 dev->mem_end = s[i].map.mem_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return 1;
605 }
606 }
607 return 0;
608}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700609EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611
612/**
tcharding722c9a02017-02-09 17:56:04 +1100613 * netdev_boot_base - get address from boot time settings
614 * @prefix: prefix for network device
615 * @unit: id for network device
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 *
tcharding722c9a02017-02-09 17:56:04 +1100617 * Check boot time settings for the base address of device.
618 * The found settings are set for the device to be used
619 * later in the device probing.
620 * Returns 0 if no settings found.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 */
622unsigned long netdev_boot_base(const char *prefix, int unit)
623{
624 const struct netdev_boot_setup *s = dev_boot_setup;
625 char name[IFNAMSIZ];
626 int i;
627
628 sprintf(name, "%s%d", prefix, unit);
629
630 /*
631 * If device already registered then return base of 1
632 * to indicate not to probe for this interface
633 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700634 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return 1;
636
637 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
638 if (!strcmp(name, s[i].name))
639 return s[i].map.base_addr;
640 return 0;
641}
642
643/*
644 * Saves at boot time configured settings for any netdevice.
645 */
646int __init netdev_boot_setup(char *str)
647{
648 int ints[5];
649 struct ifmap map;
650
651 str = get_options(str, ARRAY_SIZE(ints), ints);
652 if (!str || !*str)
653 return 0;
654
655 /* Save settings */
656 memset(&map, 0, sizeof(map));
657 if (ints[0] > 0)
658 map.irq = ints[1];
659 if (ints[0] > 1)
660 map.base_addr = ints[2];
661 if (ints[0] > 2)
662 map.mem_start = ints[3];
663 if (ints[0] > 3)
664 map.mem_end = ints[4];
665
666 /* Add new entry to the list */
667 return netdev_boot_setup_add(str, &map);
668}
669
670__setup("netdev=", netdev_boot_setup);
671
672/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100673 *
674 * Device Interface Subroutines
675 *
676 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200679 * dev_get_iflink - get 'iflink' value of a interface
680 * @dev: targeted interface
681 *
682 * Indicates the ifindex the interface is linked to.
683 * Physical interfaces have the same 'ifindex' and 'iflink' values.
684 */
685
686int dev_get_iflink(const struct net_device *dev)
687{
688 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
689 return dev->netdev_ops->ndo_get_iflink(dev);
690
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200691 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200692}
693EXPORT_SYMBOL(dev_get_iflink);
694
695/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700696 * dev_fill_metadata_dst - Retrieve tunnel egress information.
697 * @dev: targeted interface
698 * @skb: The packet.
699 *
700 * For better visibility of tunnel traffic OVS needs to retrieve
701 * egress tunnel information for a packet. Following API allows
702 * user to get this info.
703 */
704int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
705{
706 struct ip_tunnel_info *info;
707
708 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
709 return -EINVAL;
710
711 info = skb_tunnel_info_unclone(skb);
712 if (!info)
713 return -ENOMEM;
714 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
715 return -EINVAL;
716
717 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
718}
719EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
720
721/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700723 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 * @name: name to find
725 *
726 * Find an interface by name. Must be called under RTNL semaphore
727 * or @dev_base_lock. If the name is found a pointer to the device
728 * is returned. If the name is not found then %NULL is returned. The
729 * reference counters are not incremented so the caller must be
730 * careful with locks.
731 */
732
Eric W. Biederman881d9662007-09-17 11:56:21 -0700733struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700735 struct net_device *dev;
736 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Sasha Levinb67bfe02013-02-27 17:06:00 -0800738 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 if (!strncmp(dev->name, name, IFNAMSIZ))
740 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700741
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return NULL;
743}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700744EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746/**
tcharding722c9a02017-02-09 17:56:04 +1100747 * dev_get_by_name_rcu - find a device by its name
748 * @net: the applicable net namespace
749 * @name: name to find
Eric Dumazet72c95282009-10-30 07:11:27 +0000750 *
tcharding722c9a02017-02-09 17:56:04 +1100751 * Find an interface by name.
752 * If the name is found a pointer to the device is returned.
753 * If the name is not found then %NULL is returned.
754 * The reference counters are not incremented so the caller must be
755 * careful with locks. The caller must hold RCU lock.
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 */
757
758struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
759{
Eric Dumazet72c95282009-10-30 07:11:27 +0000760 struct net_device *dev;
761 struct hlist_head *head = dev_name_hash(net, name);
762
Sasha Levinb67bfe02013-02-27 17:06:00 -0800763 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000764 if (!strncmp(dev->name, name, IFNAMSIZ))
765 return dev;
766
767 return NULL;
768}
769EXPORT_SYMBOL(dev_get_by_name_rcu);
770
771/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700773 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 * @name: name to find
775 *
776 * Find an interface by name. This can be called from any
777 * context and does its own locking. The returned handle has
778 * the usage count incremented and the caller must use dev_put() to
779 * release it when it is no longer needed. %NULL is returned if no
780 * matching device is found.
781 */
782
Eric W. Biederman881d9662007-09-17 11:56:21 -0700783struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
785 struct net_device *dev;
786
Eric Dumazet72c95282009-10-30 07:11:27 +0000787 rcu_read_lock();
788 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (dev)
790 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000791 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 return dev;
793}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700794EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
796/**
797 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700798 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 * @ifindex: index of device
800 *
801 * Search for an interface by index. Returns %NULL if the device
802 * is not found or a pointer to the device. The device has not
803 * had its reference counter increased so the caller must be careful
804 * about locking. The caller must hold either the RTNL semaphore
805 * or @dev_base_lock.
806 */
807
Eric W. Biederman881d9662007-09-17 11:56:21 -0700808struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700810 struct net_device *dev;
811 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Sasha Levinb67bfe02013-02-27 17:06:00 -0800813 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (dev->ifindex == ifindex)
815 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700816
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 return NULL;
818}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700819EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000821/**
822 * dev_get_by_index_rcu - find a device by its ifindex
823 * @net: the applicable net namespace
824 * @ifindex: index of device
825 *
826 * Search for an interface by index. Returns %NULL if the device
827 * is not found or a pointer to the device. The device has not
828 * had its reference counter increased so the caller must be careful
829 * about locking. The caller must hold RCU lock.
830 */
831
832struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
833{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000834 struct net_device *dev;
835 struct hlist_head *head = dev_index_hash(net, ifindex);
836
Sasha Levinb67bfe02013-02-27 17:06:00 -0800837 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000838 if (dev->ifindex == ifindex)
839 return dev;
840
841 return NULL;
842}
843EXPORT_SYMBOL(dev_get_by_index_rcu);
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846/**
847 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700848 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 * @ifindex: index of device
850 *
851 * Search for an interface by index. Returns NULL if the device
852 * is not found or a pointer to the device. The device returned has
853 * had a reference added and the pointer is safe until the user calls
854 * dev_put to indicate they have finished with it.
855 */
856
Eric W. Biederman881d9662007-09-17 11:56:21 -0700857struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858{
859 struct net_device *dev;
860
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000861 rcu_read_lock();
862 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 if (dev)
864 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000865 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 return dev;
867}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700868EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870/**
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200871 * dev_get_by_napi_id - find a device by napi_id
872 * @napi_id: ID of the NAPI struct
873 *
874 * Search for an interface by NAPI ID. Returns %NULL if the device
875 * is not found or a pointer to the device. The device has not had
876 * its reference counter increased so the caller must be careful
877 * about locking. The caller must hold RCU lock.
878 */
879
880struct net_device *dev_get_by_napi_id(unsigned int napi_id)
881{
882 struct napi_struct *napi;
883
884 WARN_ON_ONCE(!rcu_read_lock_held());
885
886 if (napi_id < MIN_NAPI_ID)
887 return NULL;
888
889 napi = napi_by_id(napi_id);
890
891 return napi ? napi->dev : NULL;
892}
893EXPORT_SYMBOL(dev_get_by_napi_id);
894
895/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200896 * netdev_get_name - get a netdevice name, knowing its ifindex.
897 * @net: network namespace
898 * @name: a pointer to the buffer where the name will be stored.
899 * @ifindex: the ifindex of the interface to get the name from.
900 *
901 * The use of raw_seqcount_begin() and cond_resched() before
902 * retrying is required as we want to give the writers a chance
903 * to complete when CONFIG_PREEMPT is not set.
904 */
905int netdev_get_name(struct net *net, char *name, int ifindex)
906{
907 struct net_device *dev;
908 unsigned int seq;
909
910retry:
911 seq = raw_seqcount_begin(&devnet_rename_seq);
912 rcu_read_lock();
913 dev = dev_get_by_index_rcu(net, ifindex);
914 if (!dev) {
915 rcu_read_unlock();
916 return -ENODEV;
917 }
918
919 strcpy(name, dev->name);
920 rcu_read_unlock();
921 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
922 cond_resched();
923 goto retry;
924 }
925
926 return 0;
927}
928
929/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000930 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700931 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 * @type: media type of device
933 * @ha: hardware address
934 *
935 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800936 * is not found or a pointer to the device.
937 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000938 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 * and the caller must therefore be careful about locking
940 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 */
942
Eric Dumazet941666c2010-12-05 01:23:53 +0000943struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
944 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
946 struct net_device *dev;
947
Eric Dumazet941666c2010-12-05 01:23:53 +0000948 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 if (dev->type == type &&
950 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700951 return dev;
952
953 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954}
Eric Dumazet941666c2010-12-05 01:23:53 +0000955EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300956
Eric W. Biederman881d9662007-09-17 11:56:21 -0700957struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700958{
959 struct net_device *dev;
960
961 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700962 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700963 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700964 return dev;
965
966 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700967}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700968EXPORT_SYMBOL(__dev_getfirstbyhwtype);
969
Eric W. Biederman881d9662007-09-17 11:56:21 -0700970struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000972 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000974 rcu_read_lock();
975 for_each_netdev_rcu(net, dev)
976 if (dev->type == type) {
977 dev_hold(dev);
978 ret = dev;
979 break;
980 }
981 rcu_read_unlock();
982 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984EXPORT_SYMBOL(dev_getfirstbyhwtype);
985
986/**
WANG Cong6c555492014-09-11 15:35:09 -0700987 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700988 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 * @if_flags: IFF_* values
990 * @mask: bitmask of bits in if_flags to check
991 *
992 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000993 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700994 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 */
996
WANG Cong6c555492014-09-11 15:35:09 -0700997struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
998 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999{
Pavel Emelianov7562f872007-05-03 15:13:45 -07001000 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
WANG Cong6c555492014-09-11 15:35:09 -07001002 ASSERT_RTNL();
1003
Pavel Emelianov7562f872007-05-03 15:13:45 -07001004 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -07001005 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -07001007 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 break;
1009 }
1010 }
Pavel Emelianov7562f872007-05-03 15:13:45 -07001011 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012}
WANG Cong6c555492014-09-11 15:35:09 -07001013EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
1015/**
1016 * dev_valid_name - check if name is okay for network device
1017 * @name: name string
1018 *
1019 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -07001020 * to allow sysfs to work. We also disallow any kind of
1021 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 */
David S. Miller95f050b2012-03-06 16:12:15 -05001023bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
David S. Millerc7fa9d12006-08-15 16:34:13 -07001025 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -05001026 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07001027 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -05001028 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001029 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -05001030 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001031
1032 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -06001033 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001034 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001035 name++;
1036 }
David S. Miller95f050b2012-03-06 16:12:15 -05001037 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001039EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001042 * __dev_alloc_name - allocate a name for a device
1043 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001045 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 *
1047 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001048 * id. It scans list of devices to build up a free map, then chooses
1049 * the first empty slot. The caller must hold the dev_base or rtnl lock
1050 * while allocating the name and adding the device in order to avoid
1051 * duplicates.
1052 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1053 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 */
1055
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001056static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057{
1058 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 const char *p;
1060 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001061 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 struct net_device *d;
1063
1064 p = strnchr(name, IFNAMSIZ-1, '%');
1065 if (p) {
1066 /*
1067 * Verify the string as this thing may have come from
1068 * the user. There must be either one "%d" and no other "%"
1069 * characters.
1070 */
1071 if (p[1] != 'd' || strchr(p + 2, '%'))
1072 return -EINVAL;
1073
1074 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001075 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 if (!inuse)
1077 return -ENOMEM;
1078
Eric W. Biederman881d9662007-09-17 11:56:21 -07001079 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 if (!sscanf(d->name, name, &i))
1081 continue;
1082 if (i < 0 || i >= max_netdevices)
1083 continue;
1084
1085 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001086 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 if (!strncmp(buf, d->name, IFNAMSIZ))
1088 set_bit(i, inuse);
1089 }
1090
1091 i = find_first_zero_bit(inuse, max_netdevices);
1092 free_page((unsigned long) inuse);
1093 }
1094
Octavian Purdilad9031022009-11-18 02:36:59 +00001095 if (buf != name)
1096 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001097 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100 /* It is possible to run out of possible slots
1101 * when the name is long and there isn't enough space left
1102 * for the digits, or if all bits are used.
1103 */
1104 return -ENFILE;
1105}
1106
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001107/**
1108 * dev_alloc_name - allocate a name for a device
1109 * @dev: device
1110 * @name: name format string
1111 *
1112 * Passed a format string - eg "lt%d" it will try and find a suitable
1113 * id. It scans list of devices to build up a free map, then chooses
1114 * the first empty slot. The caller must hold the dev_base or rtnl lock
1115 * while allocating the name and adding the device in order to avoid
1116 * duplicates.
1117 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1118 * Returns the number of the unit assigned or a negative errno code.
1119 */
1120
1121int dev_alloc_name(struct net_device *dev, const char *name)
1122{
1123 char buf[IFNAMSIZ];
1124 struct net *net;
1125 int ret;
1126
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001127 BUG_ON(!dev_net(dev));
1128 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001129 ret = __dev_alloc_name(net, name, buf);
1130 if (ret >= 0)
1131 strlcpy(dev->name, buf, IFNAMSIZ);
1132 return ret;
1133}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001134EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001135
Gao feng828de4f2012-09-13 20:58:27 +00001136static int dev_alloc_name_ns(struct net *net,
1137 struct net_device *dev,
1138 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001139{
Gao feng828de4f2012-09-13 20:58:27 +00001140 char buf[IFNAMSIZ];
1141 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001142
Gao feng828de4f2012-09-13 20:58:27 +00001143 ret = __dev_alloc_name(net, name, buf);
1144 if (ret >= 0)
1145 strlcpy(dev->name, buf, IFNAMSIZ);
1146 return ret;
1147}
1148
1149static int dev_get_valid_name(struct net *net,
1150 struct net_device *dev,
1151 const char *name)
1152{
1153 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001154
Octavian Purdilad9031022009-11-18 02:36:59 +00001155 if (!dev_valid_name(name))
1156 return -EINVAL;
1157
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001158 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001159 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001160 else if (__dev_get_by_name(net, name))
1161 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001162 else if (dev->name != name)
1163 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001164
1165 return 0;
1166}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168/**
1169 * dev_change_name - change name of a device
1170 * @dev: device
1171 * @newname: name (or format string) must be at least IFNAMSIZ
1172 *
1173 * Change name of a device, can pass format strings "eth%d".
1174 * for wildcarding.
1175 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001176int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177{
Tom Gundersen238fa362014-07-14 16:37:23 +02001178 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001179 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001181 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001182 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001185 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001187 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 if (dev->flags & IFF_UP)
1189 return -EBUSY;
1190
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001191 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001192
1193 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001194 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001195 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001196 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001197
Herbert Xufcc5a032007-07-30 17:03:38 -07001198 memcpy(oldname, dev->name, IFNAMSIZ);
1199
Gao feng828de4f2012-09-13 20:58:27 +00001200 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001201 if (err < 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001202 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001203 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001206 if (oldname[0] && !strchr(oldname, '%'))
1207 netdev_info(dev, "renamed from %s\n", oldname);
1208
Tom Gundersen238fa362014-07-14 16:37:23 +02001209 old_assign_type = dev->name_assign_type;
1210 dev->name_assign_type = NET_NAME_RENAMED;
1211
Herbert Xufcc5a032007-07-30 17:03:38 -07001212rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001213 ret = device_rename(&dev->dev, dev->name);
1214 if (ret) {
1215 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001216 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001217 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001218 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001219 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001220
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001221 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001222
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001223 netdev_adjacent_rename_links(dev, oldname);
1224
Herbert Xu7f988ea2007-07-30 16:35:46 -07001225 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001226 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001227 write_unlock_bh(&dev_base_lock);
1228
1229 synchronize_rcu();
1230
1231 write_lock_bh(&dev_base_lock);
1232 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001233 write_unlock_bh(&dev_base_lock);
1234
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001235 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001236 ret = notifier_to_errno(ret);
1237
1238 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001239 /* err >= 0 after dev_alloc_name() or stores the first errno */
1240 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001241 err = ret;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001242 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001243 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001244 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001245 dev->name_assign_type = old_assign_type;
1246 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001247 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001248 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001249 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001250 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001251 }
1252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
1254 return err;
1255}
1256
1257/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001258 * dev_set_alias - change ifalias of a device
1259 * @dev: device
1260 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001261 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001262 *
1263 * Set ifalias for a device,
1264 */
1265int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1266{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001267 char *new_ifalias;
1268
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001269 ASSERT_RTNL();
1270
1271 if (len >= IFALIASZ)
1272 return -EINVAL;
1273
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001274 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001275 kfree(dev->ifalias);
1276 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001277 return 0;
1278 }
1279
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001280 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1281 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001282 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001283 dev->ifalias = new_ifalias;
Alexander Potapenkoc28294b2017-06-06 15:56:54 +02001284 memcpy(dev->ifalias, alias, len);
1285 dev->ifalias[len] = 0;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001286
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001287 return len;
1288}
1289
1290
1291/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001292 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001293 * @dev: device to cause notification
1294 *
1295 * Called to indicate a device has changed features.
1296 */
1297void netdev_features_change(struct net_device *dev)
1298{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001299 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001300}
1301EXPORT_SYMBOL(netdev_features_change);
1302
1303/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 * netdev_state_change - device changes state
1305 * @dev: device to cause notification
1306 *
1307 * Called to indicate a device has changed state. This function calls
1308 * the notifier chains for netdev_chain and sends a NEWLINK message
1309 * to the routing socket.
1310 */
1311void netdev_state_change(struct net_device *dev)
1312{
1313 if (dev->flags & IFF_UP) {
Loic Prylli54951192014-07-01 21:39:43 -07001314 struct netdev_notifier_change_info change_info;
1315
1316 change_info.flags_changed = 0;
1317 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1318 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001319 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 }
1321}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001322EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Amerigo Wangee89bab2012-08-09 22:14:56 +00001324/**
tcharding722c9a02017-02-09 17:56:04 +11001325 * netdev_notify_peers - notify network peers about existence of @dev
1326 * @dev: network device
Amerigo Wangee89bab2012-08-09 22:14:56 +00001327 *
1328 * Generate traffic such that interested network peers are aware of
1329 * @dev, such as by generating a gratuitous ARP. This may be used when
1330 * a device wants to inform the rest of the network about some sort of
1331 * reconfiguration such as a failover event or virtual machine
1332 * migration.
1333 */
1334void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001335{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001336 rtnl_lock();
1337 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
Vlad Yasevich37c343b2017-03-14 08:58:08 -04001338 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00001339 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001340}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001341EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001342
Patrick McHardybd380812010-02-26 06:34:53 +00001343static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001345 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001346 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001348 ASSERT_RTNL();
1349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (!netif_device_present(dev))
1351 return -ENODEV;
1352
Neil Hormanca99ca12013-02-05 08:05:43 +00001353 /* Block netpoll from trying to do any rx path servicing.
1354 * If we don't do this there is a chance ndo_poll_controller
1355 * or ndo_poll may be running while we open the device
1356 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001357 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001358
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001359 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1360 ret = notifier_to_errno(ret);
1361 if (ret)
1362 return ret;
1363
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001365
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001366 if (ops->ndo_validate_addr)
1367 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001368
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001369 if (!ret && ops->ndo_open)
1370 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Eric W. Biederman66b55522014-03-27 15:39:03 -07001372 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001373
Jeff Garzikbada3392007-10-23 20:19:37 -07001374 if (ret)
1375 clear_bit(__LINK_STATE_START, &dev->state);
1376 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001378 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001380 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 return ret;
1384}
Patrick McHardybd380812010-02-26 06:34:53 +00001385
1386/**
1387 * dev_open - prepare an interface for use.
1388 * @dev: device to open
1389 *
1390 * Takes a device from down to up state. The device's private open
1391 * function is invoked and then the multicast lists are loaded. Finally
1392 * the device is moved into the up state and a %NETDEV_UP message is
1393 * sent to the netdev notifier chain.
1394 *
1395 * Calling this function on an active interface is a nop. On a failure
1396 * a negative errno code is returned.
1397 */
1398int dev_open(struct net_device *dev)
1399{
1400 int ret;
1401
Patrick McHardybd380812010-02-26 06:34:53 +00001402 if (dev->flags & IFF_UP)
1403 return 0;
1404
Patrick McHardybd380812010-02-26 06:34:53 +00001405 ret = __dev_open(dev);
1406 if (ret < 0)
1407 return ret;
1408
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001409 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001410 call_netdevice_notifiers(NETDEV_UP, dev);
1411
1412 return ret;
1413}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001414EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
Octavian Purdila44345722010-12-13 12:44:07 +00001416static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417{
Octavian Purdila44345722010-12-13 12:44:07 +00001418 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001419
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001420 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001421 might_sleep();
1422
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001423 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001424 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001425 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001426
Octavian Purdila44345722010-12-13 12:44:07 +00001427 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
Octavian Purdila44345722010-12-13 12:44:07 +00001429 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
Octavian Purdila44345722010-12-13 12:44:07 +00001431 /* Synchronize to scheduled poll. We cannot touch poll list, it
1432 * can be even on different cpu. So just clear netif_running().
1433 *
1434 * dev->stop() will invoke napi_disable() on all of it's
1435 * napi_struct instances on this device.
1436 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001437 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Octavian Purdila44345722010-12-13 12:44:07 +00001440 dev_deactivate_many(head);
1441
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001442 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001443 const struct net_device_ops *ops = dev->netdev_ops;
1444
1445 /*
1446 * Call the device specific close. This cannot fail.
1447 * Only if device is UP
1448 *
1449 * We allow it to be called even after a DETACH hot-plug
1450 * event.
1451 */
1452 if (ops->ndo_stop)
1453 ops->ndo_stop(dev);
1454
Octavian Purdila44345722010-12-13 12:44:07 +00001455 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001456 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001457 }
1458
1459 return 0;
1460}
1461
1462static int __dev_close(struct net_device *dev)
1463{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001464 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001465 LIST_HEAD(single);
1466
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001467 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001468 retval = __dev_close_many(&single);
1469 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001470
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001471 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001472}
1473
David S. Miller99c4a262015-03-18 22:52:33 -04001474int dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001475{
1476 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001477
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001478 /* Remove the devices that don't need to be closed */
1479 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001480 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001481 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001482
1483 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001484
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001485 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001486 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001487 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001488 if (unlink)
1489 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 return 0;
1493}
David S. Miller99c4a262015-03-18 22:52:33 -04001494EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001495
1496/**
1497 * dev_close - shutdown an interface.
1498 * @dev: device to shutdown
1499 *
1500 * This function moves an active device into down state. A
1501 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1502 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1503 * chain.
1504 */
1505int dev_close(struct net_device *dev)
1506{
Eric Dumazete14a5992011-05-10 12:26:06 -07001507 if (dev->flags & IFF_UP) {
1508 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001509
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001510 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001511 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001512 list_del(&single);
1513 }
dingtianhongda6e3782013-05-27 19:53:31 +00001514 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001515}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001516EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001519/**
1520 * dev_disable_lro - disable Large Receive Offload on a device
1521 * @dev: device
1522 *
1523 * Disable Large Receive Offload (LRO) on a net device. Must be
1524 * called under RTNL. This is needed if received packets may be
1525 * forwarded to another interface.
1526 */
1527void dev_disable_lro(struct net_device *dev)
1528{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001529 struct net_device *lower_dev;
1530 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001531
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001532 dev->wanted_features &= ~NETIF_F_LRO;
1533 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001534
Michał Mirosław22d59692011-04-21 12:42:15 +00001535 if (unlikely(dev->features & NETIF_F_LRO))
1536 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001537
1538 netdev_for_each_lower_dev(dev, lower_dev, iter)
1539 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001540}
1541EXPORT_SYMBOL(dev_disable_lro);
1542
Jiri Pirko351638e2013-05-28 01:30:21 +00001543static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1544 struct net_device *dev)
1545{
1546 struct netdev_notifier_info info;
1547
1548 netdev_notifier_info_init(&info, dev);
1549 return nb->notifier_call(nb, val, &info);
1550}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001551
Eric W. Biederman881d9662007-09-17 11:56:21 -07001552static int dev_boot_phase = 1;
1553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554/**
tcharding722c9a02017-02-09 17:56:04 +11001555 * register_netdevice_notifier - register a network notifier block
1556 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 *
tcharding722c9a02017-02-09 17:56:04 +11001558 * Register a notifier to be called when network device events occur.
1559 * The notifier passed is linked into the kernel structures and must
1560 * not be reused until it has been unregistered. A negative errno code
1561 * is returned on a failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 *
tcharding722c9a02017-02-09 17:56:04 +11001563 * When registered all registration and up events are replayed
1564 * to the new notifier to allow device to have a race free
1565 * view of the network device list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 */
1567
1568int register_netdevice_notifier(struct notifier_block *nb)
1569{
1570 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001571 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001572 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 int err;
1574
1575 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001576 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001577 if (err)
1578 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001579 if (dev_boot_phase)
1580 goto unlock;
1581 for_each_net(net) {
1582 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001583 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001584 err = notifier_to_errno(err);
1585 if (err)
1586 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Eric W. Biederman881d9662007-09-17 11:56:21 -07001588 if (!(dev->flags & IFF_UP))
1589 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001590
Jiri Pirko351638e2013-05-28 01:30:21 +00001591 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001594
1595unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 rtnl_unlock();
1597 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001598
1599rollback:
1600 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001601 for_each_net(net) {
1602 for_each_netdev(net, dev) {
1603 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001604 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001605
Eric W. Biederman881d9662007-09-17 11:56:21 -07001606 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001607 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1608 dev);
1609 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001610 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001611 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001612 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001613 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001614
RongQing.Li8f891482011-11-30 23:43:07 -05001615outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001616 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001617 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001619EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
1621/**
tcharding722c9a02017-02-09 17:56:04 +11001622 * unregister_netdevice_notifier - unregister a network notifier block
1623 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 *
tcharding722c9a02017-02-09 17:56:04 +11001625 * Unregister a notifier previously registered by
1626 * register_netdevice_notifier(). The notifier is unlinked into the
1627 * kernel structures and may then be reused. A negative errno code
1628 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001629 *
tcharding722c9a02017-02-09 17:56:04 +11001630 * After unregistering unregister and down device events are synthesized
1631 * for all devices on the device list to the removed notifier to remove
1632 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 */
1634
1635int unregister_netdevice_notifier(struct notifier_block *nb)
1636{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001637 struct net_device *dev;
1638 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001639 int err;
1640
1641 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001642 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001643 if (err)
1644 goto unlock;
1645
1646 for_each_net(net) {
1647 for_each_netdev(net, dev) {
1648 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001649 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1650 dev);
1651 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001652 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001653 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001654 }
1655 }
1656unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001657 rtnl_unlock();
1658 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001660EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001663 * call_netdevice_notifiers_info - call all network notifier blocks
1664 * @val: value passed unmodified to notifier function
1665 * @dev: net_device pointer passed unmodified to notifier function
1666 * @info: notifier information data
1667 *
1668 * Call all network notifier blocks. Parameters and return value
1669 * are as for raw_notifier_call_chain().
1670 */
1671
stephen hemminger1d143d92013-12-29 14:01:29 -08001672static int call_netdevice_notifiers_info(unsigned long val,
1673 struct net_device *dev,
1674 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001675{
1676 ASSERT_RTNL();
1677 netdev_notifier_info_init(info, dev);
1678 return raw_notifier_call_chain(&netdev_chain, val, info);
1679}
Jiri Pirko351638e2013-05-28 01:30:21 +00001680
1681/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 * call_netdevice_notifiers - call all network notifier blocks
1683 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001684 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 *
1686 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001687 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 */
1689
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001690int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691{
Jiri Pirko351638e2013-05-28 01:30:21 +00001692 struct netdev_notifier_info info;
1693
1694 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001696EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Pablo Neira1cf519002015-05-13 18:19:37 +02001698#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02001699static struct static_key ingress_needed __read_mostly;
1700
1701void net_inc_ingress_queue(void)
1702{
1703 static_key_slow_inc(&ingress_needed);
1704}
1705EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1706
1707void net_dec_ingress_queue(void)
1708{
1709 static_key_slow_dec(&ingress_needed);
1710}
1711EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1712#endif
1713
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001714#ifdef CONFIG_NET_EGRESS
1715static struct static_key egress_needed __read_mostly;
1716
1717void net_inc_egress_queue(void)
1718{
1719 static_key_slow_inc(&egress_needed);
1720}
1721EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1722
1723void net_dec_egress_queue(void)
1724{
1725 static_key_slow_dec(&egress_needed);
1726}
1727EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1728#endif
1729
Ingo Molnarc5905af2012-02-24 08:31:31 +01001730static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001731#ifdef HAVE_JUMP_LABEL
Eric Dumazetb90e5792011-11-28 11:16:50 +00001732static atomic_t netstamp_needed_deferred;
Eric Dumazet13baa002017-03-01 14:28:39 -08001733static atomic_t netstamp_wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001734static void netstamp_clear(struct work_struct *work)
1735{
1736 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
Eric Dumazet13baa002017-03-01 14:28:39 -08001737 int wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001738
Eric Dumazet13baa002017-03-01 14:28:39 -08001739 wanted = atomic_add_return(deferred, &netstamp_wanted);
1740 if (wanted > 0)
1741 static_key_enable(&netstamp_needed);
1742 else
1743 static_key_disable(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001744}
1745static DECLARE_WORK(netstamp_work, netstamp_clear);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001746#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748void net_enable_timestamp(void)
1749{
Eric Dumazet13baa002017-03-01 14:28:39 -08001750#ifdef HAVE_JUMP_LABEL
1751 int wanted;
1752
1753 while (1) {
1754 wanted = atomic_read(&netstamp_wanted);
1755 if (wanted <= 0)
1756 break;
1757 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1758 return;
1759 }
1760 atomic_inc(&netstamp_needed_deferred);
1761 schedule_work(&netstamp_work);
1762#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001763 static_key_slow_inc(&netstamp_needed);
Eric Dumazet13baa002017-03-01 14:28:39 -08001764#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001766EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
1768void net_disable_timestamp(void)
1769{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001770#ifdef HAVE_JUMP_LABEL
Eric Dumazet13baa002017-03-01 14:28:39 -08001771 int wanted;
1772
1773 while (1) {
1774 wanted = atomic_read(&netstamp_wanted);
1775 if (wanted <= 1)
1776 break;
1777 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1778 return;
1779 }
1780 atomic_dec(&netstamp_needed_deferred);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001781 schedule_work(&netstamp_work);
1782#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001783 static_key_slow_dec(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001784#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001786EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
Eric Dumazet3b098e22010-05-15 23:57:10 -07001788static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
Thomas Gleixner2456e852016-12-25 11:38:40 +01001790 skb->tstamp = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001791 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001792 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793}
1794
Eric Dumazet588f0332011-11-15 04:12:55 +00001795#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001796 if (static_key_false(&netstamp_needed)) { \
Thomas Gleixner2456e852016-12-25 11:38:40 +01001797 if ((COND) && !(SKB)->tstamp) \
Eric Dumazet588f0332011-11-15 04:12:55 +00001798 __net_timestamp(SKB); \
1799 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001800
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02001801bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001802{
1803 unsigned int len;
1804
1805 if (!(dev->flags & IFF_UP))
1806 return false;
1807
1808 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1809 if (skb->len <= len)
1810 return true;
1811
1812 /* if TSO is enabled, we don't care about the length as the packet
1813 * could be forwarded without being segmented before
1814 */
1815 if (skb_is_gso(skb))
1816 return true;
1817
1818 return false;
1819}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001820EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001821
Herbert Xua0265d22014-04-17 13:45:03 +08001822int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1823{
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001824 int ret = ____dev_forward_skb(dev, skb);
1825
1826 if (likely(!ret)) {
1827 skb->protocol = eth_type_trans(skb, dev);
1828 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001829 }
1830
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001831 return ret;
Herbert Xua0265d22014-04-17 13:45:03 +08001832}
1833EXPORT_SYMBOL_GPL(__dev_forward_skb);
1834
Arnd Bergmann44540962009-11-26 06:07:08 +00001835/**
1836 * dev_forward_skb - loopback an skb to another netif
1837 *
1838 * @dev: destination network device
1839 * @skb: buffer to forward
1840 *
1841 * return values:
1842 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001843 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001844 *
1845 * dev_forward_skb can be used for injecting an skb from the
1846 * start_xmit function of one device into the receive queue
1847 * of another device.
1848 *
1849 * The receiving device may be in another namespace, so
1850 * we have to clear all information in the skb that could
1851 * impact namespace isolation.
1852 */
1853int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1854{
Herbert Xua0265d22014-04-17 13:45:03 +08001855 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001856}
1857EXPORT_SYMBOL_GPL(dev_forward_skb);
1858
Changli Gao71d9dec2010-12-15 19:57:25 +00001859static inline int deliver_skb(struct sk_buff *skb,
1860 struct packet_type *pt_prev,
1861 struct net_device *orig_dev)
1862{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001863 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1864 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001865 atomic_inc(&skb->users);
1866 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1867}
1868
Salam Noureddine7866a622015-01-27 11:35:48 -08001869static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1870 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001871 struct net_device *orig_dev,
1872 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001873 struct list_head *ptype_list)
1874{
1875 struct packet_type *ptype, *pt_prev = *pt;
1876
1877 list_for_each_entry_rcu(ptype, ptype_list, list) {
1878 if (ptype->type != type)
1879 continue;
1880 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001881 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001882 pt_prev = ptype;
1883 }
1884 *pt = pt_prev;
1885}
1886
Eric Leblondc0de08d2012-08-16 22:02:58 +00001887static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1888{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001889 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001890 return false;
1891
1892 if (ptype->id_match)
1893 return ptype->id_match(ptype, skb->sk);
1894 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1895 return true;
1896
1897 return false;
1898}
1899
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900/*
1901 * Support routine. Sends outgoing frames to any network
1902 * taps currently in use.
1903 */
1904
David Ahern74b20582016-05-10 11:19:50 -07001905void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906{
1907 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001908 struct sk_buff *skb2 = NULL;
1909 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001910 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001913again:
1914 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 /* Never send packets back to the socket
1916 * they originated from - MvS (miquels@drinkel.ow.org)
1917 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001918 if (skb_loop_sk(ptype, skb))
1919 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001920
Salam Noureddine7866a622015-01-27 11:35:48 -08001921 if (pt_prev) {
1922 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001923 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001924 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001926
1927 /* need to clone skb, done only once */
1928 skb2 = skb_clone(skb, GFP_ATOMIC);
1929 if (!skb2)
1930 goto out_unlock;
1931
1932 net_timestamp_set(skb2);
1933
1934 /* skb->nh should be correctly
1935 * set by sender, so that the second statement is
1936 * just protection against buggy protocols.
1937 */
1938 skb_reset_mac_header(skb2);
1939
1940 if (skb_network_header(skb2) < skb2->data ||
1941 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1942 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1943 ntohs(skb2->protocol),
1944 dev->name);
1945 skb_reset_network_header(skb2);
1946 }
1947
1948 skb2->transport_header = skb2->network_header;
1949 skb2->pkt_type = PACKET_OUTGOING;
1950 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001952
1953 if (ptype_list == &ptype_all) {
1954 ptype_list = &dev->ptype_all;
1955 goto again;
1956 }
1957out_unlock:
Changli Gao71d9dec2010-12-15 19:57:25 +00001958 if (pt_prev)
1959 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 rcu_read_unlock();
1961}
David Ahern74b20582016-05-10 11:19:50 -07001962EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
Ben Hutchings2c530402012-07-10 10:55:09 +00001964/**
1965 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001966 * @dev: Network device
1967 * @txq: number of queues available
1968 *
1969 * If real_num_tx_queues is changed the tc mappings may no longer be
1970 * valid. To resolve this verify the tc mapping remains valid and if
1971 * not NULL the mapping. With no priorities mapping to this
1972 * offset/count pair it will no longer be used. In the worst case TC0
1973 * is invalid nothing can be done so disable priority mappings. If is
1974 * expected that drivers will fix this mapping if they can before
1975 * calling netif_set_real_num_tx_queues.
1976 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001977static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001978{
1979 int i;
1980 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1981
1982 /* If TC0 is invalidated disable TC mapping */
1983 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001984 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001985 dev->num_tc = 0;
1986 return;
1987 }
1988
1989 /* Invalidated prio to tc mappings set to TC0 */
1990 for (i = 1; i < TC_BITMASK + 1; i++) {
1991 int q = netdev_get_prio_tc_map(dev, i);
1992
1993 tc = &dev->tc_to_txq[q];
1994 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001995 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1996 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001997 netdev_set_prio_tc_map(dev, i, 0);
1998 }
1999 }
2000}
2001
Alexander Duyck8d059b02016-10-28 11:43:49 -04002002int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2003{
2004 if (dev->num_tc) {
2005 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2006 int i;
2007
2008 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2009 if ((txq - tc->offset) < tc->count)
2010 return i;
2011 }
2012
2013 return -1;
2014 }
2015
2016 return 0;
2017}
2018
Alexander Duyck537c00d2013-01-10 08:57:02 +00002019#ifdef CONFIG_XPS
2020static DEFINE_MUTEX(xps_map_mutex);
2021#define xmap_dereference(P) \
2022 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2023
Alexander Duyck6234f872016-10-28 11:46:49 -04002024static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2025 int tci, u16 index)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002026{
2027 struct xps_map *map = NULL;
2028 int pos;
2029
2030 if (dev_maps)
Alexander Duyck6234f872016-10-28 11:46:49 -04002031 map = xmap_dereference(dev_maps->cpu_map[tci]);
2032 if (!map)
2033 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002034
Alexander Duyck6234f872016-10-28 11:46:49 -04002035 for (pos = map->len; pos--;) {
2036 if (map->queues[pos] != index)
2037 continue;
2038
2039 if (map->len > 1) {
2040 map->queues[pos] = map->queues[--map->len];
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002041 break;
2042 }
Alexander Duyck6234f872016-10-28 11:46:49 -04002043
2044 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2045 kfree_rcu(map, rcu);
2046 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002047 }
2048
Alexander Duyck6234f872016-10-28 11:46:49 -04002049 return true;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002050}
2051
Alexander Duyck6234f872016-10-28 11:46:49 -04002052static bool remove_xps_queue_cpu(struct net_device *dev,
2053 struct xps_dev_maps *dev_maps,
2054 int cpu, u16 offset, u16 count)
2055{
Alexander Duyck184c4492016-10-28 11:50:13 -04002056 int num_tc = dev->num_tc ? : 1;
2057 bool active = false;
2058 int tci;
Alexander Duyck6234f872016-10-28 11:46:49 -04002059
Alexander Duyck184c4492016-10-28 11:50:13 -04002060 for (tci = cpu * num_tc; num_tc--; tci++) {
2061 int i, j;
2062
2063 for (i = count, j = offset; i--; j++) {
2064 if (!remove_xps_queue(dev_maps, cpu, j))
2065 break;
2066 }
2067
2068 active |= i < 0;
Alexander Duyck6234f872016-10-28 11:46:49 -04002069 }
2070
Alexander Duyck184c4492016-10-28 11:50:13 -04002071 return active;
Alexander Duyck6234f872016-10-28 11:46:49 -04002072}
2073
2074static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2075 u16 count)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002076{
2077 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00002078 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002079 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002080
2081 mutex_lock(&xps_map_mutex);
2082 dev_maps = xmap_dereference(dev->xps_maps);
2083
2084 if (!dev_maps)
2085 goto out_no_maps;
2086
Alexander Duyck6234f872016-10-28 11:46:49 -04002087 for_each_possible_cpu(cpu)
2088 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2089 offset, count);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002090
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002091 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00002092 RCU_INIT_POINTER(dev->xps_maps, NULL);
2093 kfree_rcu(dev_maps, rcu);
2094 }
2095
Alexander Duyck6234f872016-10-28 11:46:49 -04002096 for (i = offset + (count - 1); count--; i--)
Alexander Duyck024e9672013-01-10 08:57:46 +00002097 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2098 NUMA_NO_NODE);
2099
Alexander Duyck537c00d2013-01-10 08:57:02 +00002100out_no_maps:
2101 mutex_unlock(&xps_map_mutex);
2102}
2103
Alexander Duyck6234f872016-10-28 11:46:49 -04002104static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2105{
2106 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2107}
2108
Alexander Duyck01c5f862013-01-10 08:57:35 +00002109static struct xps_map *expand_xps_map(struct xps_map *map,
2110 int cpu, u16 index)
2111{
2112 struct xps_map *new_map;
2113 int alloc_len = XPS_MIN_MAP_ALLOC;
2114 int i, pos;
2115
2116 for (pos = 0; map && pos < map->len; pos++) {
2117 if (map->queues[pos] != index)
2118 continue;
2119 return map;
2120 }
2121
2122 /* Need to add queue to this CPU's existing map */
2123 if (map) {
2124 if (pos < map->alloc_len)
2125 return map;
2126
2127 alloc_len = map->alloc_len * 2;
2128 }
2129
2130 /* Need to allocate new map to store queue on this CPU's map */
2131 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2132 cpu_to_node(cpu));
2133 if (!new_map)
2134 return NULL;
2135
2136 for (i = 0; i < pos; i++)
2137 new_map->queues[i] = map->queues[i];
2138 new_map->alloc_len = alloc_len;
2139 new_map->len = pos;
2140
2141 return new_map;
2142}
2143
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002144int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2145 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002146{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002147 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck184c4492016-10-28 11:50:13 -04002148 int i, cpu, tci, numa_node_id = -2;
2149 int maps_sz, num_tc = 1, tc = 0;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002150 struct xps_map *map, *new_map;
Alexander Duyck01c5f862013-01-10 08:57:35 +00002151 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002152
Alexander Duyck184c4492016-10-28 11:50:13 -04002153 if (dev->num_tc) {
2154 num_tc = dev->num_tc;
2155 tc = netdev_txq_to_tc(dev, index);
2156 if (tc < 0)
2157 return -EINVAL;
2158 }
2159
2160 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2161 if (maps_sz < L1_CACHE_BYTES)
2162 maps_sz = L1_CACHE_BYTES;
2163
Alexander Duyck537c00d2013-01-10 08:57:02 +00002164 mutex_lock(&xps_map_mutex);
2165
2166 dev_maps = xmap_dereference(dev->xps_maps);
2167
Alexander Duyck01c5f862013-01-10 08:57:35 +00002168 /* allocate memory for queue storage */
Alexander Duyck184c4492016-10-28 11:50:13 -04002169 for_each_cpu_and(cpu, cpu_online_mask, mask) {
Alexander Duyck01c5f862013-01-10 08:57:35 +00002170 if (!new_dev_maps)
2171 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002172 if (!new_dev_maps) {
2173 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002174 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002175 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002176
Alexander Duyck184c4492016-10-28 11:50:13 -04002177 tci = cpu * num_tc + tc;
2178 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
Alexander Duyck01c5f862013-01-10 08:57:35 +00002179 NULL;
2180
2181 map = expand_xps_map(map, cpu, index);
2182 if (!map)
2183 goto error;
2184
Alexander Duyck184c4492016-10-28 11:50:13 -04002185 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002186 }
2187
2188 if (!new_dev_maps)
2189 goto out_no_new_maps;
2190
2191 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002192 /* copy maps belonging to foreign traffic classes */
2193 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2194 /* fill in the new device map from the old device map */
2195 map = xmap_dereference(dev_maps->cpu_map[tci]);
2196 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2197 }
2198
2199 /* We need to explicitly update tci as prevous loop
2200 * could break out early if dev_maps is NULL.
2201 */
2202 tci = cpu * num_tc + tc;
2203
Alexander Duyck01c5f862013-01-10 08:57:35 +00002204 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2205 /* add queue to CPU maps */
2206 int pos = 0;
2207
Alexander Duyck184c4492016-10-28 11:50:13 -04002208 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002209 while ((pos < map->len) && (map->queues[pos] != index))
2210 pos++;
2211
2212 if (pos == map->len)
2213 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002214#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002215 if (numa_node_id == -2)
2216 numa_node_id = cpu_to_node(cpu);
2217 else if (numa_node_id != cpu_to_node(cpu))
2218 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002219#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002220 } else if (dev_maps) {
2221 /* fill in the new device map from the old device map */
Alexander Duyck184c4492016-10-28 11:50:13 -04002222 map = xmap_dereference(dev_maps->cpu_map[tci]);
2223 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002224 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002225
Alexander Duyck184c4492016-10-28 11:50:13 -04002226 /* copy maps belonging to foreign traffic classes */
2227 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2228 /* fill in the new device map from the old device map */
2229 map = xmap_dereference(dev_maps->cpu_map[tci]);
2230 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2231 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002232 }
2233
Alexander Duyck01c5f862013-01-10 08:57:35 +00002234 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2235
Alexander Duyck537c00d2013-01-10 08:57:02 +00002236 /* Cleanup old maps */
Alexander Duyck184c4492016-10-28 11:50:13 -04002237 if (!dev_maps)
2238 goto out_no_old_maps;
2239
2240 for_each_possible_cpu(cpu) {
2241 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2242 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2243 map = xmap_dereference(dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002244 if (map && map != new_map)
2245 kfree_rcu(map, rcu);
2246 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002247 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002248
Alexander Duyck184c4492016-10-28 11:50:13 -04002249 kfree_rcu(dev_maps, rcu);
2250
2251out_no_old_maps:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002252 dev_maps = new_dev_maps;
2253 active = true;
2254
2255out_no_new_maps:
2256 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002257 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2258 (numa_node_id >= 0) ? numa_node_id :
2259 NUMA_NO_NODE);
2260
Alexander Duyck01c5f862013-01-10 08:57:35 +00002261 if (!dev_maps)
2262 goto out_no_maps;
2263
2264 /* removes queue from unused CPUs */
2265 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002266 for (i = tc, tci = cpu * num_tc; i--; tci++)
2267 active |= remove_xps_queue(dev_maps, tci, index);
2268 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2269 active |= remove_xps_queue(dev_maps, tci, index);
2270 for (i = num_tc - tc, tci++; --i; tci++)
2271 active |= remove_xps_queue(dev_maps, tci, index);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002272 }
2273
2274 /* free map if not active */
2275 if (!active) {
2276 RCU_INIT_POINTER(dev->xps_maps, NULL);
2277 kfree_rcu(dev_maps, rcu);
2278 }
2279
2280out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002281 mutex_unlock(&xps_map_mutex);
2282
2283 return 0;
2284error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002285 /* remove any maps that we added */
2286 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002287 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2288 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2289 map = dev_maps ?
2290 xmap_dereference(dev_maps->cpu_map[tci]) :
2291 NULL;
2292 if (new_map && new_map != map)
2293 kfree(new_map);
2294 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002295 }
2296
Alexander Duyck537c00d2013-01-10 08:57:02 +00002297 mutex_unlock(&xps_map_mutex);
2298
Alexander Duyck537c00d2013-01-10 08:57:02 +00002299 kfree(new_dev_maps);
2300 return -ENOMEM;
2301}
2302EXPORT_SYMBOL(netif_set_xps_queue);
2303
2304#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002305void netdev_reset_tc(struct net_device *dev)
2306{
Alexander Duyck6234f872016-10-28 11:46:49 -04002307#ifdef CONFIG_XPS
2308 netif_reset_xps_queues_gt(dev, 0);
2309#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002310 dev->num_tc = 0;
2311 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2312 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2313}
2314EXPORT_SYMBOL(netdev_reset_tc);
2315
2316int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2317{
2318 if (tc >= dev->num_tc)
2319 return -EINVAL;
2320
Alexander Duyck6234f872016-10-28 11:46:49 -04002321#ifdef CONFIG_XPS
2322 netif_reset_xps_queues(dev, offset, count);
2323#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002324 dev->tc_to_txq[tc].count = count;
2325 dev->tc_to_txq[tc].offset = offset;
2326 return 0;
2327}
2328EXPORT_SYMBOL(netdev_set_tc_queue);
2329
2330int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2331{
2332 if (num_tc > TC_MAX_QUEUE)
2333 return -EINVAL;
2334
Alexander Duyck6234f872016-10-28 11:46:49 -04002335#ifdef CONFIG_XPS
2336 netif_reset_xps_queues_gt(dev, 0);
2337#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002338 dev->num_tc = num_tc;
2339 return 0;
2340}
2341EXPORT_SYMBOL(netdev_set_num_tc);
2342
John Fastabendf0796d52010-07-01 13:21:57 +00002343/*
2344 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2345 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2346 */
Tom Herberte6484932010-10-18 18:04:39 +00002347int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002348{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002349 int rc;
2350
Tom Herberte6484932010-10-18 18:04:39 +00002351 if (txq < 1 || txq > dev->num_tx_queues)
2352 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002353
Ben Hutchings5c565802011-02-15 19:39:21 +00002354 if (dev->reg_state == NETREG_REGISTERED ||
2355 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002356 ASSERT_RTNL();
2357
Tom Herbert1d24eb42010-11-21 13:17:27 +00002358 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2359 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002360 if (rc)
2361 return rc;
2362
John Fastabend4f57c082011-01-17 08:06:04 +00002363 if (dev->num_tc)
2364 netif_setup_tc(dev, txq);
2365
Alexander Duyck024e9672013-01-10 08:57:46 +00002366 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002367 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002368#ifdef CONFIG_XPS
2369 netif_reset_xps_queues_gt(dev, txq);
2370#endif
2371 }
John Fastabendf0796d52010-07-01 13:21:57 +00002372 }
Tom Herberte6484932010-10-18 18:04:39 +00002373
2374 dev->real_num_tx_queues = txq;
2375 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002376}
2377EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002378
Michael Daltona953be52014-01-16 22:23:28 -08002379#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002380/**
2381 * netif_set_real_num_rx_queues - set actual number of RX queues used
2382 * @dev: Network device
2383 * @rxq: Actual number of RX queues
2384 *
2385 * This must be called either with the rtnl_lock held or before
2386 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002387 * negative error code. If called before registration, it always
2388 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002389 */
2390int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2391{
2392 int rc;
2393
Tom Herbertbd25fa72010-10-18 18:00:16 +00002394 if (rxq < 1 || rxq > dev->num_rx_queues)
2395 return -EINVAL;
2396
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002397 if (dev->reg_state == NETREG_REGISTERED) {
2398 ASSERT_RTNL();
2399
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002400 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2401 rxq);
2402 if (rc)
2403 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002404 }
2405
2406 dev->real_num_rx_queues = rxq;
2407 return 0;
2408}
2409EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2410#endif
2411
Ben Hutchings2c530402012-07-10 10:55:09 +00002412/**
2413 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002414 *
2415 * This routine should set an upper limit on the number of RSS queues
2416 * used by default by multiqueue devices.
2417 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002418int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002419{
Hariprasad Shenai40e4e712016-06-08 18:09:08 +05302420 return is_kdump_kernel() ?
2421 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
Yuval Mintz16917b82012-07-01 03:18:50 +00002422}
2423EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2424
Eric Dumazet3bcb8462016-06-04 20:02:28 -07002425static void __netif_reschedule(struct Qdisc *q)
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002426{
2427 struct softnet_data *sd;
2428 unsigned long flags;
2429
2430 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002431 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002432 q->next_sched = NULL;
2433 *sd->output_queue_tailp = q;
2434 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002435 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2436 local_irq_restore(flags);
2437}
2438
David S. Miller37437bb2008-07-16 02:15:04 -07002439void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002440{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002441 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2442 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002443}
2444EXPORT_SYMBOL(__netif_schedule);
2445
Eric Dumazete6247022013-12-05 04:45:08 -08002446struct dev_kfree_skb_cb {
2447 enum skb_free_reason reason;
2448};
2449
2450static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002451{
Eric Dumazete6247022013-12-05 04:45:08 -08002452 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002453}
Denis Vlasenko56079432006-03-29 15:57:29 -08002454
John Fastabend46e5da40a2014-09-12 20:04:52 -07002455void netif_schedule_queue(struct netdev_queue *txq)
2456{
2457 rcu_read_lock();
2458 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2459 struct Qdisc *q = rcu_dereference(txq->qdisc);
2460
2461 __netif_schedule(q);
2462 }
2463 rcu_read_unlock();
2464}
2465EXPORT_SYMBOL(netif_schedule_queue);
2466
John Fastabend46e5da40a2014-09-12 20:04:52 -07002467void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2468{
2469 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2470 struct Qdisc *q;
2471
2472 rcu_read_lock();
2473 q = rcu_dereference(dev_queue->qdisc);
2474 __netif_schedule(q);
2475 rcu_read_unlock();
2476 }
2477}
2478EXPORT_SYMBOL(netif_tx_wake_queue);
2479
Eric Dumazete6247022013-12-05 04:45:08 -08002480void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2481{
2482 unsigned long flags;
2483
Myungho Jung98998862017-04-25 11:58:15 -07002484 if (unlikely(!skb))
2485 return;
2486
Eric Dumazete6247022013-12-05 04:45:08 -08002487 if (likely(atomic_read(&skb->users) == 1)) {
2488 smp_rmb();
2489 atomic_set(&skb->users, 0);
2490 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2491 return;
2492 }
2493 get_kfree_skb_cb(skb)->reason = reason;
2494 local_irq_save(flags);
2495 skb->next = __this_cpu_read(softnet_data.completion_queue);
2496 __this_cpu_write(softnet_data.completion_queue, skb);
2497 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2498 local_irq_restore(flags);
2499}
2500EXPORT_SYMBOL(__dev_kfree_skb_irq);
2501
2502void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002503{
2504 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002505 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002506 else
2507 dev_kfree_skb(skb);
2508}
Eric Dumazete6247022013-12-05 04:45:08 -08002509EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002510
2511
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002512/**
2513 * netif_device_detach - mark device as removed
2514 * @dev: network device
2515 *
2516 * Mark device as removed from system and therefore no longer available.
2517 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002518void netif_device_detach(struct net_device *dev)
2519{
2520 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2521 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002522 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002523 }
2524}
2525EXPORT_SYMBOL(netif_device_detach);
2526
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002527/**
2528 * netif_device_attach - mark device as attached
2529 * @dev: network device
2530 *
2531 * Mark device as attached from system and restart if needed.
2532 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002533void netif_device_attach(struct net_device *dev)
2534{
2535 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2536 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002537 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002538 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002539 }
2540}
2541EXPORT_SYMBOL(netif_device_attach);
2542
Jiri Pirko5605c762015-05-12 14:56:12 +02002543/*
2544 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2545 * to be used as a distribution range.
2546 */
2547u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2548 unsigned int num_tx_queues)
2549{
2550 u32 hash;
2551 u16 qoffset = 0;
2552 u16 qcount = num_tx_queues;
2553
2554 if (skb_rx_queue_recorded(skb)) {
2555 hash = skb_get_rx_queue(skb);
2556 while (unlikely(hash >= num_tx_queues))
2557 hash -= num_tx_queues;
2558 return hash;
2559 }
2560
2561 if (dev->num_tc) {
2562 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
tchardingf4563a72017-02-09 17:56:07 +11002563
Jiri Pirko5605c762015-05-12 14:56:12 +02002564 qoffset = dev->tc_to_txq[tc].offset;
2565 qcount = dev->tc_to_txq[tc].count;
2566 }
2567
2568 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2569}
2570EXPORT_SYMBOL(__skb_tx_hash);
2571
Ben Hutchings36c92472012-01-17 07:57:56 +00002572static void skb_warn_bad_offload(const struct sk_buff *skb)
2573{
Wei Tang84d15ae2016-06-16 21:17:49 +08002574 static const netdev_features_t null_features;
Ben Hutchings36c92472012-01-17 07:57:56 +00002575 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01002576 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00002577
Ben Greearc846ad92013-04-19 10:45:52 +00002578 if (!net_ratelimit())
2579 return;
2580
Bjørn Mork88ad4172015-11-16 19:16:40 +01002581 if (dev) {
2582 if (dev->dev.parent)
2583 name = dev_driver_string(dev->dev.parent);
2584 else
2585 name = netdev_name(dev);
2586 }
Ben Hutchings36c92472012-01-17 07:57:56 +00002587 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2588 "gso_type=%d ip_summed=%d\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01002589 name, dev ? &dev->features : &null_features,
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002590 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002591 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2592 skb_shinfo(skb)->gso_type, skb->ip_summed);
2593}
2594
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595/*
2596 * Invalidate hardware checksum when packet is to be mangled, and
2597 * complete checksum manually on outgoing path.
2598 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002599int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600{
Al Virod3bc23e2006-11-14 21:24:49 -08002601 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002602 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
Patrick McHardy84fa7932006-08-29 16:44:56 -07002604 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002605 goto out_set_summed;
2606
2607 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002608 skb_warn_bad_offload(skb);
2609 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 }
2611
Eric Dumazetcef401d2013-01-25 20:34:37 +00002612 /* Before computing a checksum, we should make sure no frag could
2613 * be modified by an external entity : checksum could be wrong.
2614 */
2615 if (skb_has_shared_frag(skb)) {
2616 ret = __skb_linearize(skb);
2617 if (ret)
2618 goto out;
2619 }
2620
Michał Mirosław55508d62010-12-14 15:24:08 +00002621 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002622 BUG_ON(offset >= skb_headlen(skb));
2623 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2624
2625 offset += skb->csum_offset;
2626 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2627
2628 if (skb_cloned(skb) &&
2629 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2631 if (ret)
2632 goto out;
2633 }
2634
Eric Dumazet4f2e4ad2016-10-29 11:02:36 -07002635 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
Herbert Xua430a432006-07-08 13:34:56 -07002636out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002638out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 return ret;
2640}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002641EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642
Davide Carattib72b5bf2017-05-18 15:44:38 +02002643int skb_crc32c_csum_help(struct sk_buff *skb)
2644{
2645 __le32 crc32c_csum;
2646 int ret = 0, offset, start;
2647
2648 if (skb->ip_summed != CHECKSUM_PARTIAL)
2649 goto out;
2650
2651 if (unlikely(skb_is_gso(skb)))
2652 goto out;
2653
2654 /* Before computing a checksum, we should make sure no frag could
2655 * be modified by an external entity : checksum could be wrong.
2656 */
2657 if (unlikely(skb_has_shared_frag(skb))) {
2658 ret = __skb_linearize(skb);
2659 if (ret)
2660 goto out;
2661 }
2662 start = skb_checksum_start_offset(skb);
2663 offset = start + offsetof(struct sctphdr, checksum);
2664 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2665 ret = -EINVAL;
2666 goto out;
2667 }
2668 if (skb_cloned(skb) &&
2669 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2670 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2671 if (ret)
2672 goto out;
2673 }
2674 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2675 skb->len - start, ~(__u32)0,
2676 crc32c_csum_stub));
2677 *(__le32 *)(skb->data + offset) = crc32c_csum;
2678 skb->ip_summed = CHECKSUM_NONE;
Davide Carattidba00302017-05-18 15:44:40 +02002679 skb->csum_not_inet = 0;
Davide Carattib72b5bf2017-05-18 15:44:38 +02002680out:
2681 return ret;
2682}
2683
Vlad Yasevich53d64712014-03-27 17:26:18 -04002684__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002685{
2686 __be16 type = skb->protocol;
2687
Pravin B Shelar19acc322013-05-07 20:41:07 +00002688 /* Tunnel gso handlers can set protocol to ethernet. */
2689 if (type == htons(ETH_P_TEB)) {
2690 struct ethhdr *eth;
2691
2692 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2693 return 0;
2694
2695 eth = (struct ethhdr *)skb_mac_header(skb);
2696 type = eth->h_proto;
2697 }
2698
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002699 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002700}
2701
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002702/**
2703 * skb_mac_gso_segment - mac layer segmentation handler.
2704 * @skb: buffer to segment
2705 * @features: features for the output path (see dev->features)
2706 */
2707struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2708 netdev_features_t features)
2709{
2710 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2711 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002712 int vlan_depth = skb->mac_len;
2713 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002714
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002715 if (unlikely(!type))
2716 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002717
Vlad Yasevich53d64712014-03-27 17:26:18 -04002718 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002719
2720 rcu_read_lock();
2721 list_for_each_entry_rcu(ptype, &offload_base, list) {
2722 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002723 segs = ptype->callbacks.gso_segment(skb, features);
2724 break;
2725 }
2726 }
2727 rcu_read_unlock();
2728
2729 __skb_push(skb, skb->data - skb_mac_header(skb));
2730
2731 return segs;
2732}
2733EXPORT_SYMBOL(skb_mac_gso_segment);
2734
2735
Cong Wang12b00042013-02-05 16:36:38 +00002736/* openvswitch calls this on rx path, so we need a different check.
2737 */
2738static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2739{
2740 if (tx_path)
Eric Dumazet6e7bc472017-02-03 14:29:42 -08002741 return skb->ip_summed != CHECKSUM_PARTIAL &&
2742 skb->ip_summed != CHECKSUM_NONE;
2743
2744 return skb->ip_summed == CHECKSUM_NONE;
Cong Wang12b00042013-02-05 16:36:38 +00002745}
2746
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002747/**
Cong Wang12b00042013-02-05 16:36:38 +00002748 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002749 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002750 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002751 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002752 *
2753 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002754 *
2755 * It may return NULL if the skb requires no segmentation. This is
2756 * only possible when GSO is used for verifying header integrity.
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002757 *
2758 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002759 */
Cong Wang12b00042013-02-05 16:36:38 +00002760struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2761 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002762{
Eric Dumazetb2504a52017-01-31 10:20:32 -08002763 struct sk_buff *segs;
2764
Cong Wang12b00042013-02-05 16:36:38 +00002765 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002766 int err;
2767
Eric Dumazetb2504a52017-01-31 10:20:32 -08002768 /* We're going to init ->check field in TCP or UDP header */
françois romieua40e0a62014-07-15 23:55:35 +02002769 err = skb_cow_head(skb, 0);
2770 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002771 return ERR_PTR(err);
2772 }
2773
Alexander Duyck802ab552016-04-10 21:45:03 -04002774 /* Only report GSO partial support if it will enable us to
2775 * support segmentation on this frame without needing additional
2776 * work.
2777 */
2778 if (features & NETIF_F_GSO_PARTIAL) {
2779 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2780 struct net_device *dev = skb->dev;
2781
2782 partial_features |= dev->features & dev->gso_partial_features;
2783 if (!skb_gso_ok(skb, features | partial_features))
2784 features &= ~NETIF_F_GSO_PARTIAL;
2785 }
2786
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002787 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2788 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2789
Pravin B Shelar68c33162013-02-14 14:02:41 +00002790 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002791 SKB_GSO_CB(skb)->encap_level = 0;
2792
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002793 skb_reset_mac_header(skb);
2794 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002795
Eric Dumazetb2504a52017-01-31 10:20:32 -08002796 segs = skb_mac_gso_segment(skb, features);
2797
2798 if (unlikely(skb_needs_check(skb, tx_path)))
2799 skb_warn_bad_offload(skb);
2800
2801 return segs;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002802}
Cong Wang12b00042013-02-05 16:36:38 +00002803EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002804
Herbert Xufb286bb2005-11-10 13:01:24 -08002805/* Take action when hardware reception checksum errors are detected. */
2806#ifdef CONFIG_BUG
2807void netdev_rx_csum_fault(struct net_device *dev)
2808{
2809 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002810 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002811 dump_stack();
2812 }
2813}
2814EXPORT_SYMBOL(netdev_rx_csum_fault);
2815#endif
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817/* Actually, we should eliminate this check as soon as we know, that:
2818 * 1. IOMMU is present and allows to map all the memory.
2819 * 2. No high memory really exists on this machine.
2820 */
2821
Florian Westphalc1e756b2014-05-05 15:00:44 +02002822static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002824#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 int i;
tchardingf4563a72017-02-09 17:56:07 +11002826
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002827 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2829 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tchardingf4563a72017-02-09 17:56:07 +11002830
Ian Campbellea2ab692011-08-22 23:44:58 +00002831 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002832 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002833 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002834 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002836 if (PCI_DMA_BUS_IS_PHYS) {
2837 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
Eric Dumazet9092c652010-04-02 13:34:49 -07002839 if (!pdev)
2840 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002841 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002842 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2843 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
tchardingf4563a72017-02-09 17:56:07 +11002844
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002845 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2846 return 1;
2847 }
2848 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002849#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 return 0;
2851}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
Simon Horman3b392dd2014-06-04 08:53:17 +09002853/* If MPLS offload request, verify we are testing hardware MPLS features
2854 * instead of standard features for the netdev.
2855 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002856#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002857static netdev_features_t net_mpls_features(struct sk_buff *skb,
2858 netdev_features_t features,
2859 __be16 type)
2860{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002861 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002862 features &= skb->dev->mpls_features;
2863
2864 return features;
2865}
2866#else
2867static netdev_features_t net_mpls_features(struct sk_buff *skb,
2868 netdev_features_t features,
2869 __be16 type)
2870{
2871 return features;
2872}
2873#endif
2874
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002875static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002876 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002877{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002878 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002879 __be16 type;
2880
2881 type = skb_network_protocol(skb, &tmp);
2882 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002883
Ed Cashinc0d680e2012-09-19 15:49:00 +00002884 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002885 !can_checksum_protocol(features, type)) {
Alexander Duyck996e8022016-05-02 09:25:10 -07002886 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Jesse Grossf01a5232011-01-09 06:23:31 +00002887 }
Eric Dumazet7be2c822017-01-18 12:12:17 -08002888 if (illegal_highdma(skb->dev, skb))
2889 features &= ~NETIF_F_SG;
Jesse Grossf01a5232011-01-09 06:23:31 +00002890
2891 return features;
2892}
2893
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002894netdev_features_t passthru_features_check(struct sk_buff *skb,
2895 struct net_device *dev,
2896 netdev_features_t features)
2897{
2898 return features;
2899}
2900EXPORT_SYMBOL(passthru_features_check);
2901
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002902static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2903 struct net_device *dev,
2904 netdev_features_t features)
2905{
2906 return vlan_features_check(skb, features);
2907}
2908
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002909static netdev_features_t gso_features_check(const struct sk_buff *skb,
2910 struct net_device *dev,
2911 netdev_features_t features)
2912{
2913 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2914
2915 if (gso_segs > dev->gso_max_segs)
2916 return features & ~NETIF_F_GSO_MASK;
2917
Alexander Duyck802ab552016-04-10 21:45:03 -04002918 /* Support for GSO partial features requires software
2919 * intervention before we can actually process the packets
2920 * so we need to strip support for any partial features now
2921 * and we can pull them back in after we have partially
2922 * segmented the frame.
2923 */
2924 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2925 features &= ~dev->gso_partial_features;
2926
2927 /* Make sure to clear the IPv4 ID mangling feature if the
2928 * IPv4 header has the potential to be fragmented.
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002929 */
2930 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2931 struct iphdr *iph = skb->encapsulation ?
2932 inner_ip_hdr(skb) : ip_hdr(skb);
2933
2934 if (!(iph->frag_off & htons(IP_DF)))
2935 features &= ~NETIF_F_TSO_MANGLEID;
2936 }
2937
2938 return features;
2939}
2940
Florian Westphalc1e756b2014-05-05 15:00:44 +02002941netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002942{
Jesse Gross5f352272014-12-23 22:37:26 -08002943 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002944 netdev_features_t features = dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002945
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002946 if (skb_is_gso(skb))
2947 features = gso_features_check(skb, dev, features);
Ben Hutchings30b678d2012-07-30 15:57:00 +00002948
Jesse Gross5f352272014-12-23 22:37:26 -08002949 /* If encapsulation offload request, verify we are testing
2950 * hardware encapsulation features instead of standard
2951 * features for the netdev
2952 */
2953 if (skb->encapsulation)
2954 features &= dev->hw_enc_features;
2955
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09002956 if (skb_vlan_tagged(skb))
2957 features = netdev_intersect_features(features,
2958 dev->vlan_features |
2959 NETIF_F_HW_VLAN_CTAG_TX |
2960 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00002961
Jesse Gross5f352272014-12-23 22:37:26 -08002962 if (dev->netdev_ops->ndo_features_check)
2963 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2964 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002965 else
2966 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002967
Florian Westphalc1e756b2014-05-05 15:00:44 +02002968 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002969}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002970EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002971
David S. Miller2ea25512014-08-29 21:10:01 -07002972static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07002973 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002974{
David S. Miller2ea25512014-08-29 21:10:01 -07002975 unsigned int len;
2976 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08002977
Salam Noureddine7866a622015-01-27 11:35:48 -08002978 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07002979 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00002980
David S. Miller2ea25512014-08-29 21:10:01 -07002981 len = skb->len;
2982 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07002983 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07002984 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00002985
Patrick McHardy572a9d72009-11-10 06:14:14 +00002986 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002987}
David S. Miller2ea25512014-08-29 21:10:01 -07002988
David S. Miller8dcda222014-09-01 15:06:40 -07002989struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2990 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07002991{
2992 struct sk_buff *skb = first;
2993 int rc = NETDEV_TX_OK;
2994
2995 while (skb) {
2996 struct sk_buff *next = skb->next;
2997
2998 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07002999 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07003000 if (unlikely(!dev_xmit_complete(rc))) {
3001 skb->next = next;
3002 goto out;
3003 }
3004
3005 skb = next;
3006 if (netif_xmit_stopped(txq) && skb) {
3007 rc = NETDEV_TX_BUSY;
3008 break;
3009 }
3010 }
3011
3012out:
3013 *ret = rc;
3014 return skb;
3015}
3016
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07003017static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3018 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07003019{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003020 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01003021 !vlan_hw_offload_capable(features, skb->vlan_proto))
3022 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07003023 return skb;
3024}
3025
Davide Caratti43c26a12017-05-18 15:44:41 +02003026int skb_csum_hwoffload_help(struct sk_buff *skb,
3027 const netdev_features_t features)
3028{
3029 if (unlikely(skb->csum_not_inet))
3030 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3031 skb_crc32c_csum_help(skb);
3032
3033 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3034}
3035EXPORT_SYMBOL(skb_csum_hwoffload_help);
3036
Eric Dumazet55a93b32014-10-03 15:31:07 -07003037static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
David S. Millereae3f882014-08-30 15:17:13 -07003038{
3039 netdev_features_t features;
3040
David S. Millereae3f882014-08-30 15:17:13 -07003041 features = netif_skb_features(skb);
3042 skb = validate_xmit_vlan(skb, features);
3043 if (unlikely(!skb))
3044 goto out_null;
3045
Johannes Berg8b86a612015-04-17 15:45:04 +02003046 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07003047 struct sk_buff *segs;
3048
3049 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08003050 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08003051 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08003052 } else if (segs) {
3053 consume_skb(skb);
3054 skb = segs;
3055 }
David S. Millereae3f882014-08-30 15:17:13 -07003056 } else {
3057 if (skb_needs_linearize(skb, features) &&
3058 __skb_linearize(skb))
3059 goto out_kfree_skb;
3060
Steffen Klassertf6e27112017-04-14 10:07:28 +02003061 if (validate_xmit_xfrm(skb, features))
3062 goto out_kfree_skb;
3063
David S. Millereae3f882014-08-30 15:17:13 -07003064 /* If packet is not checksummed and device does not
3065 * support checksumming for this protocol, complete
3066 * checksumming here.
3067 */
3068 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3069 if (skb->encapsulation)
3070 skb_set_inner_transport_header(skb,
3071 skb_checksum_start_offset(skb));
3072 else
3073 skb_set_transport_header(skb,
3074 skb_checksum_start_offset(skb));
Davide Caratti43c26a12017-05-18 15:44:41 +02003075 if (skb_csum_hwoffload_help(skb, features))
David S. Millereae3f882014-08-30 15:17:13 -07003076 goto out_kfree_skb;
3077 }
3078 }
3079
3080 return skb;
3081
3082out_kfree_skb:
3083 kfree_skb(skb);
3084out_null:
Eric Dumazetd21fd632016-04-12 21:50:07 -07003085 atomic_long_inc(&dev->tx_dropped);
David S. Millereae3f882014-08-30 15:17:13 -07003086 return NULL;
3087}
3088
Eric Dumazet55a93b32014-10-03 15:31:07 -07003089struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3090{
3091 struct sk_buff *next, *head = NULL, *tail;
3092
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003093 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07003094 next = skb->next;
3095 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003096
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003097 /* in case skb wont be segmented, point to itself */
3098 skb->prev = skb;
3099
3100 skb = validate_xmit_skb(skb, dev);
3101 if (!skb)
3102 continue;
3103
3104 if (!head)
3105 head = skb;
3106 else
3107 tail->next = skb;
3108 /* If skb was segmented, skb->prev points to
3109 * the last segment. If not, it still contains skb.
3110 */
3111 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003112 }
3113 return head;
3114}
Willem de Bruijn104ba782016-10-26 11:23:07 -04003115EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003116
Eric Dumazet1def9232013-01-10 12:36:42 +00003117static void qdisc_pkt_len_init(struct sk_buff *skb)
3118{
3119 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3120
3121 qdisc_skb_cb(skb)->pkt_len = skb->len;
3122
3123 /* To get more precise estimation of bytes sent on wire,
3124 * we add to pkt_len the headers size of all segments
3125 */
3126 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08003127 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00003128 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00003129
Eric Dumazet757b8b12013-01-15 21:14:21 -08003130 /* mac layer + network layer */
3131 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3132
3133 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00003134 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3135 hdr_len += tcp_hdrlen(skb);
3136 else
3137 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00003138
3139 if (shinfo->gso_type & SKB_GSO_DODGY)
3140 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3141 shinfo->gso_size);
3142
3143 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00003144 }
3145}
3146
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003147static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3148 struct net_device *dev,
3149 struct netdev_queue *txq)
3150{
3151 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazet520ac302016-06-21 23:16:49 -07003152 struct sk_buff *to_free = NULL;
Eric Dumazeta2da5702011-01-20 03:48:19 +00003153 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003154 int rc;
3155
Eric Dumazeta2da5702011-01-20 03:48:19 +00003156 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003157 /*
3158 * Heuristic to force contended enqueues to serialize on a
3159 * separate lock before trying to get qdisc main lock.
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07003160 * This permits qdisc->running owner to get the lock more
Ying Xue9bf2b8c2014-06-26 15:56:31 +08003161 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07003162 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00003163 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003164 if (unlikely(contended))
3165 spin_lock(&q->busylock);
3166
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003167 spin_lock(root_lock);
3168 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
Eric Dumazet520ac302016-06-21 23:16:49 -07003169 __qdisc_drop(skb, &to_free);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003170 rc = NET_XMIT_DROP;
3171 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07003172 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003173 /*
3174 * This is a work-conserving queue; there are no old skbs
3175 * waiting to be sent out; and the qdisc is not running -
3176 * xmit the skb directly.
3177 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003178
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003179 qdisc_bstats_update(q, skb);
3180
Eric Dumazet55a93b32014-10-03 15:31:07 -07003181 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07003182 if (unlikely(contended)) {
3183 spin_unlock(&q->busylock);
3184 contended = false;
3185 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003186 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003187 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07003188 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003189
3190 rc = NET_XMIT_SUCCESS;
3191 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -07003192 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07003193 if (qdisc_run_begin(q)) {
3194 if (unlikely(contended)) {
3195 spin_unlock(&q->busylock);
3196 contended = false;
3197 }
3198 __qdisc_run(q);
3199 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003200 }
3201 spin_unlock(root_lock);
Eric Dumazet520ac302016-06-21 23:16:49 -07003202 if (unlikely(to_free))
3203 kfree_skb_list(to_free);
Eric Dumazet79640a42010-06-02 05:09:29 -07003204 if (unlikely(contended))
3205 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003206 return rc;
3207}
3208
Daniel Borkmann86f85152013-12-29 17:27:11 +01003209#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00003210static void skb_update_prio(struct sk_buff *skb)
3211{
Igor Maravic6977a792011-11-25 07:44:54 +00003212 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00003213
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003214 if (!skb->priority && skb->sk && map) {
Tejun Heo2a56a1f2015-12-07 17:38:52 -05003215 unsigned int prioidx =
3216 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003217
3218 if (prioidx < map->priomap_len)
3219 skb->priority = map->priomap[prioidx];
3220 }
Neil Horman5bc14212011-11-22 05:10:51 +00003221}
3222#else
3223#define skb_update_prio(skb)
3224#endif
3225
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02003226DEFINE_PER_CPU(int, xmit_recursion);
3227EXPORT_SYMBOL(xmit_recursion);
3228
Dave Jonesd29f7492008-07-22 14:09:06 -07003229/**
Michel Machado95603e22012-06-12 10:16:35 +00003230 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003231 * @net: network namespace this loopback is happening in
3232 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00003233 * @skb: buffer to transmit
3234 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003235int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00003236{
3237 skb_reset_mac_header(skb);
3238 __skb_pull(skb, skb_network_offset(skb));
3239 skb->pkt_type = PACKET_LOOPBACK;
3240 skb->ip_summed = CHECKSUM_UNNECESSARY;
3241 WARN_ON(!skb_dst(skb));
3242 skb_dst_force(skb);
3243 netif_rx_ni(skb);
3244 return 0;
3245}
3246EXPORT_SYMBOL(dev_loopback_xmit);
3247
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003248#ifdef CONFIG_NET_EGRESS
3249static struct sk_buff *
3250sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3251{
3252 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3253 struct tcf_result cl_res;
3254
3255 if (!cl)
3256 return skb;
3257
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003258 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003259 qdisc_bstats_cpu_update(cl->q, skb);
3260
Jiri Pirko87d83092017-05-17 11:07:54 +02003261 switch (tcf_classify(skb, cl, &cl_res, false)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003262 case TC_ACT_OK:
3263 case TC_ACT_RECLASSIFY:
3264 skb->tc_index = TC_H_MIN(cl_res.classid);
3265 break;
3266 case TC_ACT_SHOT:
3267 qdisc_qstats_cpu_drop(cl->q);
3268 *ret = NET_XMIT_DROP;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003269 kfree_skb(skb);
3270 return NULL;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003271 case TC_ACT_STOLEN:
3272 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +02003273 case TC_ACT_TRAP:
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003274 *ret = NET_XMIT_SUCCESS;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003275 consume_skb(skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003276 return NULL;
3277 case TC_ACT_REDIRECT:
3278 /* No need to push/pop skb's mac_header here on egress! */
3279 skb_do_redirect(skb);
3280 *ret = NET_XMIT_SUCCESS;
3281 return NULL;
3282 default:
3283 break;
3284 }
3285
3286 return skb;
3287}
3288#endif /* CONFIG_NET_EGRESS */
3289
Jiri Pirko638b2a62015-05-12 14:56:13 +02003290static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3291{
3292#ifdef CONFIG_XPS
3293 struct xps_dev_maps *dev_maps;
3294 struct xps_map *map;
3295 int queue_index = -1;
3296
3297 rcu_read_lock();
3298 dev_maps = rcu_dereference(dev->xps_maps);
3299 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04003300 unsigned int tci = skb->sender_cpu - 1;
3301
3302 if (dev->num_tc) {
3303 tci *= dev->num_tc;
3304 tci += netdev_get_prio_tc_map(dev, skb->priority);
3305 }
3306
3307 map = rcu_dereference(dev_maps->cpu_map[tci]);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003308 if (map) {
3309 if (map->len == 1)
3310 queue_index = map->queues[0];
3311 else
3312 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3313 map->len)];
3314 if (unlikely(queue_index >= dev->real_num_tx_queues))
3315 queue_index = -1;
3316 }
3317 }
3318 rcu_read_unlock();
3319
3320 return queue_index;
3321#else
3322 return -1;
3323#endif
3324}
3325
3326static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3327{
3328 struct sock *sk = skb->sk;
3329 int queue_index = sk_tx_queue_get(sk);
3330
3331 if (queue_index < 0 || skb->ooo_okay ||
3332 queue_index >= dev->real_num_tx_queues) {
3333 int new_index = get_xps_queue(dev, skb);
tchardingf4563a72017-02-09 17:56:07 +11003334
Jiri Pirko638b2a62015-05-12 14:56:13 +02003335 if (new_index < 0)
3336 new_index = skb_tx_hash(dev, skb);
3337
3338 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003339 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003340 rcu_access_pointer(sk->sk_dst_cache))
3341 sk_tx_queue_set(sk, new_index);
3342
3343 queue_index = new_index;
3344 }
3345
3346 return queue_index;
3347}
3348
3349struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3350 struct sk_buff *skb,
3351 void *accel_priv)
3352{
3353 int queue_index = 0;
3354
3355#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08003356 u32 sender_cpu = skb->sender_cpu - 1;
3357
3358 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003359 skb->sender_cpu = raw_smp_processor_id() + 1;
3360#endif
3361
3362 if (dev->real_num_tx_queues != 1) {
3363 const struct net_device_ops *ops = dev->netdev_ops;
tchardingf4563a72017-02-09 17:56:07 +11003364
Jiri Pirko638b2a62015-05-12 14:56:13 +02003365 if (ops->ndo_select_queue)
3366 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3367 __netdev_pick_tx);
3368 else
3369 queue_index = __netdev_pick_tx(dev, skb);
3370
3371 if (!accel_priv)
3372 queue_index = netdev_cap_txqueue(dev, queue_index);
3373 }
3374
3375 skb_set_queue_mapping(skb, queue_index);
3376 return netdev_get_tx_queue(dev, queue_index);
3377}
3378
Michel Machado95603e22012-06-12 10:16:35 +00003379/**
Jason Wang9d08dd32014-01-20 11:25:13 +08003380 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07003381 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08003382 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07003383 *
3384 * Queue a buffer for transmission to a network device. The caller must
3385 * have set the device and priority and built the buffer before calling
3386 * this function. The function can be called from an interrupt.
3387 *
3388 * A negative errno code is returned on a failure. A success does not
3389 * guarantee the frame will be transmitted as it may be dropped due
3390 * to congestion or traffic shaping.
3391 *
3392 * -----------------------------------------------------------------------------------
3393 * I notice this method can also return errors from the queue disciplines,
3394 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3395 * be positive.
3396 *
3397 * Regardless of the return value, the skb is consumed, so it is currently
3398 * difficult to retry a send to this method. (You can bump the ref count
3399 * before sending to hold a reference for retry if you are careful.)
3400 *
3401 * When calling this method, interrupts MUST be enabled. This is because
3402 * the BH enable code must have IRQs enabled so that it will not deadlock.
3403 * --BLG
3404 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05303405static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406{
3407 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07003408 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 struct Qdisc *q;
3410 int rc = -ENOMEM;
3411
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00003412 skb_reset_mac_header(skb);
3413
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003414 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3415 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3416
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003417 /* Disable soft irqs for various locks below. Also
3418 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003420 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421
Neil Horman5bc14212011-11-22 05:10:51 +00003422 skb_update_prio(skb);
3423
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003424 qdisc_pkt_len_init(skb);
3425#ifdef CONFIG_NET_CLS_ACT
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003426 skb->tc_at_ingress = 0;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003427# ifdef CONFIG_NET_EGRESS
3428 if (static_key_false(&egress_needed)) {
3429 skb = sch_handle_egress(skb, &rc, dev);
3430 if (!skb)
3431 goto out;
3432 }
3433# endif
3434#endif
Eric Dumazet02875872014-10-05 18:38:35 -07003435 /* If device/qdisc don't need skb->dst, release it right now while
3436 * its hot in this cpu cache.
3437 */
3438 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3439 skb_dst_drop(skb);
3440 else
3441 skb_dst_force(skb);
3442
Jason Wangf663dd92014-01-10 16:18:26 +08003443 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08003444 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07003445
Koki Sanagicf66ba52010-08-23 18:45:02 +09003446 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003448 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07003449 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 }
3451
3452 /* The device has no queue. Common case for software devices:
tchardingeb13da12017-02-09 17:56:06 +11003453 * loopback, all the sorts of tunnels...
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454
tchardingeb13da12017-02-09 17:56:06 +11003455 * Really, it is unlikely that netif_tx_lock protection is necessary
3456 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3457 * counters.)
3458 * However, it is possible, that they rely on protection
3459 * made by us here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460
tchardingeb13da12017-02-09 17:56:06 +11003461 * Check this and shot the lock. It is not prone from deadlocks.
3462 *Either shot noqueue qdisc, it is even simpler 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 */
3464 if (dev->flags & IFF_UP) {
3465 int cpu = smp_processor_id(); /* ok because BHs are off */
3466
David S. Millerc773e842008-07-08 23:13:53 -07003467 if (txq->xmit_lock_owner != cpu) {
Daniel Borkmanna70b5062016-06-10 21:19:06 +02003468 if (unlikely(__this_cpu_read(xmit_recursion) >
3469 XMIT_RECURSION_LIMIT))
Eric Dumazet745e20f2010-09-29 13:23:09 -07003470 goto recursion_alert;
3471
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003472 skb = validate_xmit_skb(skb, dev);
3473 if (!skb)
Eric Dumazetd21fd632016-04-12 21:50:07 -07003474 goto out;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003475
David S. Millerc773e842008-07-08 23:13:53 -07003476 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477
Tom Herbert734664982011-11-28 16:32:44 +00003478 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07003479 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07003480 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07003481 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00003482 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003483 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 goto out;
3485 }
3486 }
David S. Millerc773e842008-07-08 23:13:53 -07003487 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003488 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3489 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 } else {
3491 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003492 * unfortunately
3493 */
3494recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003495 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3496 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 }
3498 }
3499
3500 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07003501 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
Eric Dumazet015f0682014-03-27 08:45:56 -07003503 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003504 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 return rc;
3506out:
Herbert Xud4828d82006-06-22 02:28:18 -07003507 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 return rc;
3509}
Jason Wangf663dd92014-01-10 16:18:26 +08003510
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003511int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003512{
3513 return __dev_queue_xmit(skb, NULL);
3514}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003515EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516
Jason Wangf663dd92014-01-10 16:18:26 +08003517int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3518{
3519 return __dev_queue_xmit(skb, accel_priv);
3520}
3521EXPORT_SYMBOL(dev_queue_xmit_accel);
3522
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523
tchardingeb13da12017-02-09 17:56:06 +11003524/*************************************************************************
3525 * Receiver routines
3526 *************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003528int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003529EXPORT_SYMBOL(netdev_max_backlog);
3530
Eric Dumazet3b098e22010-05-15 23:57:10 -07003531int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003532int netdev_budget __read_mostly = 300;
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04003533unsigned int __read_mostly netdev_budget_usecs = 2000;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01003534int weight_p __read_mostly = 64; /* old backlog weight */
3535int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3536int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3537int dev_rx_weight __read_mostly = 64;
3538int dev_tx_weight __read_mostly = 64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003540/* Called with irq disabled */
3541static inline void ____napi_schedule(struct softnet_data *sd,
3542 struct napi_struct *napi)
3543{
3544 list_add_tail(&napi->poll_list, &sd->poll_list);
3545 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3546}
3547
Eric Dumazetdf334542010-03-24 19:13:54 +00003548#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003549
3550/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003551struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003552EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003553u32 rps_cpu_mask __read_mostly;
3554EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003555
Ingo Molnarc5905af2012-02-24 08:31:31 +01003556struct static_key rps_needed __read_mostly;
Jason Wang3df97ba2016-04-25 23:13:42 -04003557EXPORT_SYMBOL(rps_needed);
Eric Dumazet13bfff22016-12-07 08:29:10 -08003558struct static_key rfs_needed __read_mostly;
3559EXPORT_SYMBOL(rfs_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +00003560
Ben Hutchingsc4454772011-01-19 11:03:53 +00003561static struct rps_dev_flow *
3562set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3563 struct rps_dev_flow *rflow, u16 next_cpu)
3564{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003565 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003566#ifdef CONFIG_RFS_ACCEL
3567 struct netdev_rx_queue *rxqueue;
3568 struct rps_dev_flow_table *flow_table;
3569 struct rps_dev_flow *old_rflow;
3570 u32 flow_id;
3571 u16 rxq_index;
3572 int rc;
3573
3574 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003575 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3576 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003577 goto out;
3578 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3579 if (rxq_index == skb_get_rx_queue(skb))
3580 goto out;
3581
3582 rxqueue = dev->_rx + rxq_index;
3583 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3584 if (!flow_table)
3585 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003586 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003587 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3588 rxq_index, flow_id);
3589 if (rc < 0)
3590 goto out;
3591 old_rflow = rflow;
3592 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003593 rflow->filter = rc;
3594 if (old_rflow->filter == rflow->filter)
3595 old_rflow->filter = RPS_NO_FILTER;
3596 out:
3597#endif
3598 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003599 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003600 }
3601
Ben Hutchings09994d12011-10-03 04:42:46 +00003602 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003603 return rflow;
3604}
3605
Tom Herbert0a9627f2010-03-16 08:03:29 +00003606/*
3607 * get_rps_cpu is called from netif_receive_skb and returns the target
3608 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003609 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003610 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003611static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3612 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003613{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003614 const struct rps_sock_flow_table *sock_flow_table;
3615 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003616 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003617 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003618 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003619 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003620 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003621
Tom Herbert0a9627f2010-03-16 08:03:29 +00003622 if (skb_rx_queue_recorded(skb)) {
3623 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003624
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003625 if (unlikely(index >= dev->real_num_rx_queues)) {
3626 WARN_ONCE(dev->real_num_rx_queues > 1,
3627 "%s received packet on queue %u, but number "
3628 "of RX queues is %u\n",
3629 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003630 goto done;
3631 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003632 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003633 }
3634
Eric Dumazet567e4b72015-02-06 12:59:01 -08003635 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3636
3637 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3638 map = rcu_dereference(rxqueue->rps_map);
3639 if (!flow_table && !map)
3640 goto done;
3641
Changli Gao2d47b452010-08-17 19:00:56 +00003642 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003643 hash = skb_get_hash(skb);
3644 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003645 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003646
Tom Herbertfec5e652010-04-16 16:01:27 -07003647 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3648 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003649 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003650 u32 next_cpu;
3651 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003652
Eric Dumazet567e4b72015-02-06 12:59:01 -08003653 /* First check into global flow table if there is a match */
3654 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3655 if ((ident ^ hash) & ~rps_cpu_mask)
3656 goto try_rps;
3657
3658 next_cpu = ident & rps_cpu_mask;
3659
3660 /* OK, now we know there is a match,
3661 * we can look at the local (per receive queue) flow table
3662 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003663 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003664 tcpu = rflow->cpu;
3665
Tom Herbertfec5e652010-04-16 16:01:27 -07003666 /*
3667 * If the desired CPU (where last recvmsg was done) is
3668 * different from current CPU (one in the rx-queue flow
3669 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003670 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003671 * - Current CPU is offline.
3672 * - The current CPU's queue tail has advanced beyond the
3673 * last packet that was enqueued using this table entry.
3674 * This guarantees that all previous packets for the flow
3675 * have been dequeued, thus preserving in order delivery.
3676 */
3677 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003678 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003679 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003680 rflow->last_qtail)) >= 0)) {
3681 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003682 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003683 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003684
Eric Dumazeta31196b2015-04-25 09:35:24 -07003685 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003686 *rflowp = rflow;
3687 cpu = tcpu;
3688 goto done;
3689 }
3690 }
3691
Eric Dumazet567e4b72015-02-06 12:59:01 -08003692try_rps:
3693
Tom Herbert0a9627f2010-03-16 08:03:29 +00003694 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003695 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003696 if (cpu_online(tcpu)) {
3697 cpu = tcpu;
3698 goto done;
3699 }
3700 }
3701
3702done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003703 return cpu;
3704}
3705
Ben Hutchingsc4454772011-01-19 11:03:53 +00003706#ifdef CONFIG_RFS_ACCEL
3707
3708/**
3709 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3710 * @dev: Device on which the filter was set
3711 * @rxq_index: RX queue index
3712 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3713 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3714 *
3715 * Drivers that implement ndo_rx_flow_steer() should periodically call
3716 * this function for each installed filter and remove the filters for
3717 * which it returns %true.
3718 */
3719bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3720 u32 flow_id, u16 filter_id)
3721{
3722 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3723 struct rps_dev_flow_table *flow_table;
3724 struct rps_dev_flow *rflow;
3725 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003726 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003727
3728 rcu_read_lock();
3729 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3730 if (flow_table && flow_id <= flow_table->mask) {
3731 rflow = &flow_table->flows[flow_id];
3732 cpu = ACCESS_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003733 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003734 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3735 rflow->last_qtail) <
3736 (int)(10 * flow_table->mask)))
3737 expire = false;
3738 }
3739 rcu_read_unlock();
3740 return expire;
3741}
3742EXPORT_SYMBOL(rps_may_expire_flow);
3743
3744#endif /* CONFIG_RFS_ACCEL */
3745
Tom Herbert0a9627f2010-03-16 08:03:29 +00003746/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003747static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003748{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003749 struct softnet_data *sd = data;
3750
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003751 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003752 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003753}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003754
Tom Herbertfec5e652010-04-16 16:01:27 -07003755#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003756
3757/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003758 * Check if this softnet_data structure is another cpu one
3759 * If yes, queue it to our IPI list and return 1
3760 * If no, return 0
3761 */
3762static int rps_ipi_queued(struct softnet_data *sd)
3763{
3764#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003765 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003766
3767 if (sd != mysd) {
3768 sd->rps_ipi_next = mysd->rps_ipi_list;
3769 mysd->rps_ipi_list = sd;
3770
3771 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3772 return 1;
3773 }
3774#endif /* CONFIG_RPS */
3775 return 0;
3776}
3777
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003778#ifdef CONFIG_NET_FLOW_LIMIT
3779int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3780#endif
3781
3782static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3783{
3784#ifdef CONFIG_NET_FLOW_LIMIT
3785 struct sd_flow_limit *fl;
3786 struct softnet_data *sd;
3787 unsigned int old_flow, new_flow;
3788
3789 if (qlen < (netdev_max_backlog >> 1))
3790 return false;
3791
Christoph Lameter903ceff2014-08-17 12:30:35 -05003792 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003793
3794 rcu_read_lock();
3795 fl = rcu_dereference(sd->flow_limit);
3796 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003797 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003798 old_flow = fl->history[fl->history_head];
3799 fl->history[fl->history_head] = new_flow;
3800
3801 fl->history_head++;
3802 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3803
3804 if (likely(fl->buckets[old_flow]))
3805 fl->buckets[old_flow]--;
3806
3807 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3808 fl->count++;
3809 rcu_read_unlock();
3810 return true;
3811 }
3812 }
3813 rcu_read_unlock();
3814#endif
3815 return false;
3816}
3817
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003818/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003819 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3820 * queue (may be a remote CPU queue).
3821 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003822static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3823 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003824{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003825 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003826 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003827 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003828
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003829 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003830
3831 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003832
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003833 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003834 if (!netif_running(skb->dev))
3835 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003836 qlen = skb_queue_len(&sd->input_pkt_queue);
3837 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003838 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003839enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003840 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003841 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003842 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003843 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003844 return NET_RX_SUCCESS;
3845 }
3846
Eric Dumazetebda37c22010-05-06 23:51:21 +00003847 /* Schedule NAPI for backlog device
3848 * We can use non atomic operation since we own the queue lock
3849 */
3850 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003851 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003852 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003853 }
3854 goto enqueue;
3855 }
3856
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003857drop:
Changli Gaodee42872010-05-02 05:42:16 +00003858 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003859 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003860
Tom Herbert0a9627f2010-03-16 08:03:29 +00003861 local_irq_restore(flags);
3862
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003863 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003864 kfree_skb(skb);
3865 return NET_RX_DROP;
3866}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003868static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003870 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871
Eric Dumazet588f0332011-11-15 04:12:55 +00003872 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873
Koki Sanagicf66ba52010-08-23 18:45:02 +09003874 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003875#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003876 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003877 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003878 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879
Changli Gaocece1942010-08-07 20:35:43 -07003880 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003881 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003882
3883 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003884 if (cpu < 0)
3885 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003886
3887 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3888
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003889 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003890 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003891 } else
3892#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003893 {
3894 unsigned int qtail;
tchardingf4563a72017-02-09 17:56:07 +11003895
Tom Herbertfec5e652010-04-16 16:01:27 -07003896 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3897 put_cpu();
3898 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003899 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003901
3902/**
3903 * netif_rx - post buffer to the network code
3904 * @skb: buffer to post
3905 *
3906 * This function receives a packet from a device driver and queues it for
3907 * the upper (protocol) levels to process. It always succeeds. The buffer
3908 * may be dropped during processing for congestion control or by the
3909 * protocol layers.
3910 *
3911 * return values:
3912 * NET_RX_SUCCESS (no congestion)
3913 * NET_RX_DROP (packet was dropped)
3914 *
3915 */
3916
3917int netif_rx(struct sk_buff *skb)
3918{
3919 trace_netif_rx_entry(skb);
3920
3921 return netif_rx_internal(skb);
3922}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003923EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924
3925int netif_rx_ni(struct sk_buff *skb)
3926{
3927 int err;
3928
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003929 trace_netif_rx_ni_entry(skb);
3930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003932 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 if (local_softirq_pending())
3934 do_softirq();
3935 preempt_enable();
3936
3937 return err;
3938}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939EXPORT_SYMBOL(netif_rx_ni);
3940
Emese Revfy0766f782016-06-20 20:42:34 +02003941static __latent_entropy void net_tx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942{
Christoph Lameter903ceff2014-08-17 12:30:35 -05003943 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944
3945 if (sd->completion_queue) {
3946 struct sk_buff *clist;
3947
3948 local_irq_disable();
3949 clist = sd->completion_queue;
3950 sd->completion_queue = NULL;
3951 local_irq_enable();
3952
3953 while (clist) {
3954 struct sk_buff *skb = clist;
tchardingf4563a72017-02-09 17:56:07 +11003955
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 clist = clist->next;
3957
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003958 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003959 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3960 trace_consume_skb(skb);
3961 else
3962 trace_kfree_skb(skb, net_tx_action);
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01003963
3964 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
3965 __kfree_skb(skb);
3966 else
3967 __kfree_skb_defer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 }
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01003969
3970 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 }
3972
3973 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003974 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975
3976 local_irq_disable();
3977 head = sd->output_queue;
3978 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003979 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980 local_irq_enable();
3981
3982 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003983 struct Qdisc *q = head;
3984 spinlock_t *root_lock;
3985
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986 head = head->next_sched;
3987
David S. Miller5fb66222008-08-02 20:02:43 -07003988 root_lock = qdisc_lock(q);
Eric Dumazet3bcb8462016-06-04 20:02:28 -07003989 spin_lock(root_lock);
3990 /* We need to make sure head->next_sched is read
3991 * before clearing __QDISC_STATE_SCHED
3992 */
3993 smp_mb__before_atomic();
3994 clear_bit(__QDISC_STATE_SCHED, &q->state);
3995 qdisc_run(q);
3996 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 }
3998 }
3999}
4000
Javier Martinez Canillas181402a2016-09-09 08:43:15 -04004001#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
Michał Mirosławda678292009-06-05 05:35:28 +00004002/* This hook is defined here for ATM LANE */
4003int (*br_fdb_test_addr_hook)(struct net_device *dev,
4004 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07004005EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00004006#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004008static inline struct sk_buff *
4009sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4010 struct net_device *orig_dev)
Herbert Xuf697c3e2007-10-14 00:38:47 -07004011{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004012#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004013 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
4014 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00004015
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004016 /* If there's at least one ingress present somewhere (so
4017 * we get here via enabled static key), remaining devices
4018 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004019 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004020 */
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004021 if (!cl)
Daniel Borkmann45771392015-04-10 23:07:54 +02004022 return skb;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004023 if (*pt_prev) {
4024 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4025 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004026 }
4027
Florian Westphal33654952015-05-14 00:36:28 +02004028 qdisc_skb_cb(skb)->pkt_len = skb->len;
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05004029 skb->tc_at_ingress = 1;
Eric Dumazet24ea5912015-07-06 05:18:03 -07004030 qdisc_bstats_cpu_update(cl->q, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004031
Jiri Pirko87d83092017-05-17 11:07:54 +02004032 switch (tcf_classify(skb, cl, &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004033 case TC_ACT_OK:
4034 case TC_ACT_RECLASSIFY:
4035 skb->tc_index = TC_H_MIN(cl_res.classid);
4036 break;
4037 case TC_ACT_SHOT:
Eric Dumazet24ea5912015-07-06 05:18:03 -07004038 qdisc_qstats_cpu_drop(cl->q);
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004039 kfree_skb(skb);
4040 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004041 case TC_ACT_STOLEN:
4042 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +02004043 case TC_ACT_TRAP:
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004044 consume_skb(skb);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004045 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07004046 case TC_ACT_REDIRECT:
4047 /* skb_mac_header check was done by cls/act_bpf, so
4048 * we can safely push the L2 header back before
4049 * redirecting to another netdev
4050 */
4051 __skb_push(skb, skb->mac_len);
4052 skb_do_redirect(skb);
4053 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004054 default:
4055 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004056 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004057#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07004058 return skb;
4059}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004061/**
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07004062 * netdev_is_rx_handler_busy - check if receive handler is registered
4063 * @dev: device to check
4064 *
4065 * Check if a receive handler is already registered for a given device.
4066 * Return true if there one.
4067 *
4068 * The caller must hold the rtnl_mutex.
4069 */
4070bool netdev_is_rx_handler_busy(struct net_device *dev)
4071{
4072 ASSERT_RTNL();
4073 return dev && rtnl_dereference(dev->rx_handler);
4074}
4075EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4076
4077/**
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004078 * netdev_rx_handler_register - register receive handler
4079 * @dev: device to register a handler for
4080 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00004081 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004082 *
Masanari Iidae2278672014-02-18 22:54:36 +09004083 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004084 * called from __netif_receive_skb. A negative errno code is returned
4085 * on a failure.
4086 *
4087 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004088 *
4089 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004090 */
4091int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00004092 rx_handler_func_t *rx_handler,
4093 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004094{
Mahesh Bandewar1b7cd002017-01-18 15:02:49 -08004095 if (netdev_is_rx_handler_busy(dev))
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004096 return -EBUSY;
4097
Eric Dumazet00cfec32013-03-29 03:01:22 +00004098 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00004099 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004100 rcu_assign_pointer(dev->rx_handler, rx_handler);
4101
4102 return 0;
4103}
4104EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4105
4106/**
4107 * netdev_rx_handler_unregister - unregister receive handler
4108 * @dev: device to unregister a handler from
4109 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00004110 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004111 *
4112 * The caller must hold the rtnl_mutex.
4113 */
4114void netdev_rx_handler_unregister(struct net_device *dev)
4115{
4116
4117 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004118 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00004119 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4120 * section has a guarantee to see a non NULL rx_handler_data
4121 * as well.
4122 */
4123 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004124 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004125}
4126EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4127
Mel Gormanb4b9e352012-07-31 16:44:26 -07004128/*
4129 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4130 * the special handling of PFMEMALLOC skbs.
4131 */
4132static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4133{
4134 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07004135 case htons(ETH_P_ARP):
4136 case htons(ETH_P_IP):
4137 case htons(ETH_P_IPV6):
4138 case htons(ETH_P_8021Q):
4139 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07004140 return true;
4141 default:
4142 return false;
4143 }
4144}
4145
Pablo Neirae687ad62015-05-13 18:19:38 +02004146static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4147 int *ret, struct net_device *orig_dev)
4148{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004149#ifdef CONFIG_NETFILTER_INGRESS
Pablo Neirae687ad62015-05-13 18:19:38 +02004150 if (nf_hook_ingress_active(skb)) {
Aaron Conole2c1e2702016-09-21 11:35:03 -04004151 int ingress_retval;
4152
Pablo Neirae687ad62015-05-13 18:19:38 +02004153 if (*pt_prev) {
4154 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4155 *pt_prev = NULL;
4156 }
4157
Aaron Conole2c1e2702016-09-21 11:35:03 -04004158 rcu_read_lock();
4159 ingress_retval = nf_hook_ingress(skb);
4160 rcu_read_unlock();
4161 return ingress_retval;
Pablo Neirae687ad62015-05-13 18:19:38 +02004162 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004163#endif /* CONFIG_NETFILTER_INGRESS */
Pablo Neirae687ad62015-05-13 18:19:38 +02004164 return 0;
4165}
Pablo Neirae687ad62015-05-13 18:19:38 +02004166
David S. Miller9754e292013-02-14 15:57:38 -05004167static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168{
4169 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004170 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07004171 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004172 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08004174 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175
Eric Dumazet588f0332011-11-15 04:12:55 +00004176 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07004177
Koki Sanagicf66ba52010-08-23 18:45:02 +09004178 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08004179
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07004180 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00004181
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07004182 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00004183 if (!skb_transport_header_was_set(skb))
4184 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00004185 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186
4187 pt_prev = NULL;
4188
David S. Miller63d8ea72011-02-28 10:48:59 -08004189another_round:
David S. Millerb6858172012-07-23 16:27:54 -07004190 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08004191
4192 __this_cpu_inc(softnet_data.processed);
4193
Patrick McHardy8ad227f2013-04-19 02:04:31 +00004194 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4195 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004196 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004197 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004198 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004199 }
4200
Willem de Bruijne7246e12017-01-07 17:06:35 -05004201 if (skb_skip_tc_classify(skb))
4202 goto skip_classify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203
David S. Miller9754e292013-02-14 15:57:38 -05004204 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07004205 goto skip_taps;
4206
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08004208 if (pt_prev)
4209 ret = deliver_skb(skb, pt_prev, orig_dev);
4210 pt_prev = ptype;
4211 }
4212
4213 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4214 if (pt_prev)
4215 ret = deliver_skb(skb, pt_prev, orig_dev);
4216 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 }
4218
Mel Gormanb4b9e352012-07-31 16:44:26 -07004219skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02004220#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02004221 if (static_key_false(&ingress_needed)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004222 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
Daniel Borkmann45771392015-04-10 23:07:54 +02004223 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004224 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02004225
4226 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004227 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02004228 }
Pablo Neira1cf519002015-05-13 18:19:37 +02004229#endif
Willem de Bruijna5135bc2017-01-07 17:06:36 -05004230 skb_reset_tc(skb);
Willem de Bruijne7246e12017-01-07 17:06:35 -05004231skip_classify:
David S. Miller9754e292013-02-14 15:57:38 -05004232 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07004233 goto drop;
4234
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004235 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00004236 if (pt_prev) {
4237 ret = deliver_skb(skb, pt_prev, orig_dev);
4238 pt_prev = NULL;
4239 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004240 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00004241 goto another_round;
4242 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004243 goto out;
John Fastabend24257172011-10-10 09:16:41 +00004244 }
4245
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004246 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004247 if (rx_handler) {
4248 if (pt_prev) {
4249 ret = deliver_skb(skb, pt_prev, orig_dev);
4250 pt_prev = NULL;
4251 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004252 switch (rx_handler(&skb)) {
4253 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00004254 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004255 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004256 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08004257 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004258 case RX_HANDLER_EXACT:
4259 deliver_exact = true;
4260 case RX_HANDLER_PASS:
4261 break;
4262 default:
4263 BUG();
4264 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004265 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004267 if (unlikely(skb_vlan_tag_present(skb))) {
4268 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07004269 skb->pkt_type = PACKET_OTHERHOST;
4270 /* Note: we might in the future use prio bits
4271 * and set skb->priority like in vlan_do_receive()
4272 * For the time being, just ignore Priority Code Point
4273 */
4274 skb->vlan_tci = 0;
4275 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004276
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08004278
4279 /* deliver only exact match when indicated */
4280 if (likely(!deliver_exact)) {
4281 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4282 &ptype_base[ntohs(type) &
4283 PTYPE_HASH_MASK]);
4284 }
4285
4286 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4287 &orig_dev->ptype_specific);
4288
4289 if (unlikely(skb->dev != orig_dev)) {
4290 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4291 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 }
4293
4294 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004295 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00004296 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004297 else
4298 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07004300drop:
Jarod Wilson6e7333d2016-02-01 18:51:05 -05004301 if (!deliver_exact)
4302 atomic_long_inc(&skb->dev->rx_dropped);
4303 else
4304 atomic_long_inc(&skb->dev->rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 kfree_skb(skb);
4306 /* Jamal, now you will not able to escape explaining
4307 * me how you were going to use this. :-)
4308 */
4309 ret = NET_RX_DROP;
4310 }
4311
Julian Anastasov2c17d272015-07-09 09:59:10 +03004312out:
David S. Miller9754e292013-02-14 15:57:38 -05004313 return ret;
4314}
4315
4316static int __netif_receive_skb(struct sk_buff *skb)
4317{
4318 int ret;
4319
4320 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004321 unsigned int noreclaim_flag;
David S. Miller9754e292013-02-14 15:57:38 -05004322
4323 /*
4324 * PFMEMALLOC skbs are special, they should
4325 * - be delivered to SOCK_MEMALLOC sockets only
4326 * - stay away from userspace
4327 * - have bounded memory usage
4328 *
4329 * Use PF_MEMALLOC as this saves us from propagating the allocation
4330 * context down to all allocation sites.
4331 */
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004332 noreclaim_flag = memalloc_noreclaim_save();
David S. Miller9754e292013-02-14 15:57:38 -05004333 ret = __netif_receive_skb_core(skb, true);
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004334 memalloc_noreclaim_restore(noreclaim_flag);
David S. Miller9754e292013-02-14 15:57:38 -05004335 } else
4336 ret = __netif_receive_skb_core(skb, false);
4337
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338 return ret;
4339}
Tom Herbert0a9627f2010-03-16 08:03:29 +00004340
David S. Millerb5cdae32017-04-18 15:36:58 -04004341static struct static_key generic_xdp_needed __read_mostly;
4342
4343static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
4344{
4345 struct bpf_prog *new = xdp->prog;
4346 int ret = 0;
4347
4348 switch (xdp->command) {
4349 case XDP_SETUP_PROG: {
4350 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
4351
4352 rcu_assign_pointer(dev->xdp_prog, new);
4353 if (old)
4354 bpf_prog_put(old);
4355
4356 if (old && !new) {
4357 static_key_slow_dec(&generic_xdp_needed);
4358 } else if (new && !old) {
4359 static_key_slow_inc(&generic_xdp_needed);
4360 dev_disable_lro(dev);
4361 }
4362 break;
4363 }
4364
4365 case XDP_QUERY_PROG:
4366 xdp->prog_attached = !!rcu_access_pointer(dev->xdp_prog);
4367 break;
4368
4369 default:
4370 ret = -EINVAL;
4371 break;
4372 }
4373
4374 return ret;
4375}
4376
4377static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4378 struct bpf_prog *xdp_prog)
4379{
4380 struct xdp_buff xdp;
4381 u32 act = XDP_DROP;
4382 void *orig_data;
4383 int hlen, off;
4384 u32 mac_len;
4385
4386 /* Reinjected packets coming from act_mirred or similar should
4387 * not get XDP generic processing.
4388 */
4389 if (skb_cloned(skb))
4390 return XDP_PASS;
4391
4392 if (skb_linearize(skb))
4393 goto do_drop;
4394
4395 /* The XDP program wants to see the packet starting at the MAC
4396 * header.
4397 */
4398 mac_len = skb->data - skb_mac_header(skb);
4399 hlen = skb_headlen(skb) + mac_len;
4400 xdp.data = skb->data - mac_len;
4401 xdp.data_end = xdp.data + hlen;
4402 xdp.data_hard_start = skb->data - skb_headroom(skb);
4403 orig_data = xdp.data;
4404
4405 act = bpf_prog_run_xdp(xdp_prog, &xdp);
4406
4407 off = xdp.data - orig_data;
4408 if (off > 0)
4409 __skb_pull(skb, off);
4410 else if (off < 0)
4411 __skb_push(skb, -off);
4412
4413 switch (act) {
4414 case XDP_TX:
4415 __skb_push(skb, mac_len);
4416 /* fall through */
4417 case XDP_PASS:
4418 break;
4419
4420 default:
4421 bpf_warn_invalid_xdp_action(act);
4422 /* fall through */
4423 case XDP_ABORTED:
4424 trace_xdp_exception(skb->dev, xdp_prog, act);
4425 /* fall through */
4426 case XDP_DROP:
4427 do_drop:
4428 kfree_skb(skb);
4429 break;
4430 }
4431
4432 return act;
4433}
4434
4435/* When doing generic XDP we have to bypass the qdisc layer and the
4436 * network taps in order to match in-driver-XDP behavior.
4437 */
4438static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4439{
4440 struct net_device *dev = skb->dev;
4441 struct netdev_queue *txq;
4442 bool free_skb = true;
4443 int cpu, rc;
4444
4445 txq = netdev_pick_tx(dev, skb, NULL);
4446 cpu = smp_processor_id();
4447 HARD_TX_LOCK(dev, txq, cpu);
4448 if (!netif_xmit_stopped(txq)) {
4449 rc = netdev_start_xmit(skb, dev, txq, 0);
4450 if (dev_xmit_complete(rc))
4451 free_skb = false;
4452 }
4453 HARD_TX_UNLOCK(dev, txq);
4454 if (free_skb) {
4455 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4456 kfree_skb(skb);
4457 }
4458}
4459
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004460static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004461{
Julian Anastasov2c17d272015-07-09 09:59:10 +03004462 int ret;
4463
Eric Dumazet588f0332011-11-15 04:12:55 +00004464 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07004465
Richard Cochranc1f19b52010-07-17 08:49:36 +00004466 if (skb_defer_rx_timestamp(skb))
4467 return NET_RX_SUCCESS;
4468
Julian Anastasov2c17d272015-07-09 09:59:10 +03004469 rcu_read_lock();
4470
David S. Millerb5cdae32017-04-18 15:36:58 -04004471 if (static_key_false(&generic_xdp_needed)) {
4472 struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog);
4473
4474 if (xdp_prog) {
4475 u32 act = netif_receive_generic_xdp(skb, xdp_prog);
4476
4477 if (act != XDP_PASS) {
4478 rcu_read_unlock();
4479 if (act == XDP_TX)
4480 generic_xdp_tx(skb, xdp_prog);
4481 return NET_RX_DROP;
4482 }
4483 }
4484 }
4485
Eric Dumazetdf334542010-03-24 19:13:54 +00004486#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004487 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07004488 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004489 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07004490
Eric Dumazet3b098e22010-05-15 23:57:10 -07004491 if (cpu >= 0) {
4492 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4493 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00004494 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004495 }
Tom Herbertfec5e652010-04-16 16:01:27 -07004496 }
Tom Herbert1e94d722010-03-18 17:45:44 -07004497#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03004498 ret = __netif_receive_skb(skb);
4499 rcu_read_unlock();
4500 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004501}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004502
4503/**
4504 * netif_receive_skb - process receive buffer from network
4505 * @skb: buffer to process
4506 *
4507 * netif_receive_skb() is the main receive data processing function.
4508 * It always succeeds. The buffer may be dropped during processing
4509 * for congestion control or by the protocol layers.
4510 *
4511 * This function may only be called from softirq context and interrupts
4512 * should be enabled.
4513 *
4514 * Return values (usually ignored):
4515 * NET_RX_SUCCESS: no congestion
4516 * NET_RX_DROP: packet was dropped
4517 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004518int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004519{
4520 trace_netif_receive_skb_entry(skb);
4521
4522 return netif_receive_skb_internal(skb);
4523}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004524EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525
Eric Dumazet41852492016-08-26 12:50:39 -07004526DEFINE_PER_CPU(struct work_struct, flush_works);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004527
4528/* Network device is going away, flush any packets still pending */
4529static void flush_backlog(struct work_struct *work)
4530{
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004531 struct sk_buff *skb, *tmp;
4532 struct softnet_data *sd;
4533
4534 local_bh_disable();
4535 sd = this_cpu_ptr(&softnet_data);
4536
4537 local_irq_disable();
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004538 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004539 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004540 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004541 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004542 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004543 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004544 }
Changli Gao6e7676c2010-04-27 15:07:33 -07004545 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004546 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004547 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004548
4549 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004550 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Changli Gao6e7676c2010-04-27 15:07:33 -07004551 __skb_unlink(skb, &sd->process_queue);
4552 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004553 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004554 }
4555 }
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004556 local_bh_enable();
4557}
4558
Eric Dumazet41852492016-08-26 12:50:39 -07004559static void flush_all_backlogs(void)
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004560{
4561 unsigned int cpu;
4562
4563 get_online_cpus();
4564
Eric Dumazet41852492016-08-26 12:50:39 -07004565 for_each_online_cpu(cpu)
4566 queue_work_on(cpu, system_highpri_wq,
4567 per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004568
4569 for_each_online_cpu(cpu)
Eric Dumazet41852492016-08-26 12:50:39 -07004570 flush_work(per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004571
4572 put_online_cpus();
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004573}
4574
Herbert Xud565b0a2008-12-15 23:38:52 -08004575static int napi_gro_complete(struct sk_buff *skb)
4576{
Vlad Yasevich22061d82012-11-15 08:49:11 +00004577 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004578 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004579 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08004580 int err = -ENOENT;
4581
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004582 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4583
Herbert Xufc59f9a2009-04-14 15:11:06 -07004584 if (NAPI_GRO_CB(skb)->count == 1) {
4585 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004586 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07004587 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004588
4589 rcu_read_lock();
4590 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004591 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08004592 continue;
4593
Jerry Chu299603e82013-12-11 20:53:45 -08004594 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004595 break;
4596 }
4597 rcu_read_unlock();
4598
4599 if (err) {
4600 WARN_ON(&ptype->list == head);
4601 kfree_skb(skb);
4602 return NET_RX_SUCCESS;
4603 }
4604
4605out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004606 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004607}
4608
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004609/* napi->gro_list contains packets ordered by age.
4610 * youngest packets at the head of it.
4611 * Complete skbs in reverse order to reduce latencies.
4612 */
4613void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08004614{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004615 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004616
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004617 /* scan list and build reverse chain */
4618 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4619 skb->prev = prev;
4620 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08004621 }
4622
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004623 for (skb = prev; skb; skb = prev) {
4624 skb->next = NULL;
4625
4626 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4627 return;
4628
4629 prev = skb->prev;
4630 napi_gro_complete(skb);
4631 napi->gro_count--;
4632 }
4633
Herbert Xud565b0a2008-12-15 23:38:52 -08004634 napi->gro_list = NULL;
4635}
Eric Dumazet86cac582010-08-31 18:25:32 +00004636EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08004637
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004638static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4639{
4640 struct sk_buff *p;
4641 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08004642 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004643
4644 for (p = napi->gro_list; p; p = p->next) {
4645 unsigned long diffs;
4646
Tom Herbert0b4cec82014-01-15 08:58:06 -08004647 NAPI_GRO_CB(p)->flush = 0;
4648
4649 if (hash != skb_get_hash_raw(p)) {
4650 NAPI_GRO_CB(p)->same_flow = 0;
4651 continue;
4652 }
4653
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004654 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4655 diffs |= p->vlan_tci ^ skb->vlan_tci;
Jesse Grossce87fc62016-01-20 17:59:49 -08004656 diffs |= skb_metadata_dst_cmp(p, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004657 if (maclen == ETH_HLEN)
4658 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004659 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004660 else if (!diffs)
4661 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004662 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004663 maclen);
4664 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004665 }
4666}
4667
Jerry Chu299603e82013-12-11 20:53:45 -08004668static void skb_gro_reset_offset(struct sk_buff *skb)
4669{
4670 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4671 const skb_frag_t *frag0 = &pinfo->frags[0];
4672
4673 NAPI_GRO_CB(skb)->data_offset = 0;
4674 NAPI_GRO_CB(skb)->frag0 = NULL;
4675 NAPI_GRO_CB(skb)->frag0_len = 0;
4676
4677 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4678 pinfo->nr_frags &&
4679 !PageHighMem(skb_frag_page(frag0))) {
4680 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
Eric Dumazet7cfd5fd2017-01-10 19:52:43 -08004681 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4682 skb_frag_size(frag0),
4683 skb->end - skb->tail);
Herbert Xud565b0a2008-12-15 23:38:52 -08004684 }
4685}
4686
Eric Dumazeta50e2332014-03-29 21:28:21 -07004687static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4688{
4689 struct skb_shared_info *pinfo = skb_shinfo(skb);
4690
4691 BUG_ON(skb->end - skb->tail < grow);
4692
4693 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4694
4695 skb->data_len -= grow;
4696 skb->tail += grow;
4697
4698 pinfo->frags[0].page_offset += grow;
4699 skb_frag_size_sub(&pinfo->frags[0], grow);
4700
4701 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4702 skb_frag_unref(skb, 0);
4703 memmove(pinfo->frags, pinfo->frags + 1,
4704 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4705 }
4706}
4707
Rami Rosenbb728822012-11-28 21:55:25 +00004708static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004709{
4710 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004711 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004712 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004713 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004714 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004715 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004716 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004717
David S. Millerb5cdae32017-04-18 15:36:58 -04004718 if (netif_elide_gro(skb->dev))
Herbert Xud565b0a2008-12-15 23:38:52 -08004719 goto normal;
4720
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004721 gro_list_prepare(napi, skb);
4722
Herbert Xud565b0a2008-12-15 23:38:52 -08004723 rcu_read_lock();
4724 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004725 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004726 continue;
4727
Herbert Xu86911732009-01-29 14:19:50 +00004728 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004729 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004730 NAPI_GRO_CB(skb)->same_flow = 0;
Eric Dumazetd61d0722016-11-07 11:12:27 -08004731 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
Herbert Xu5d38a072009-01-04 16:13:40 -08004732 NAPI_GRO_CB(skb)->free = 0;
Jesse Grossfac8e0f2016-03-19 09:32:01 -07004733 NAPI_GRO_CB(skb)->encap_mark = 0;
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02004734 NAPI_GRO_CB(skb)->recursion_counter = 0;
Alexander Duycka0ca1532016-04-05 09:13:39 -07004735 NAPI_GRO_CB(skb)->is_fou = 0;
Alexander Duyck15305452016-04-10 21:44:57 -04004736 NAPI_GRO_CB(skb)->is_atomic = 1;
Tom Herbert15e23962015-02-10 16:30:31 -08004737 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004738
Tom Herbert662880f2014-08-27 21:26:56 -07004739 /* Setup for GRO checksum validation */
4740 switch (skb->ip_summed) {
4741 case CHECKSUM_COMPLETE:
4742 NAPI_GRO_CB(skb)->csum = skb->csum;
4743 NAPI_GRO_CB(skb)->csum_valid = 1;
4744 NAPI_GRO_CB(skb)->csum_cnt = 0;
4745 break;
4746 case CHECKSUM_UNNECESSARY:
4747 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4748 NAPI_GRO_CB(skb)->csum_valid = 0;
4749 break;
4750 default:
4751 NAPI_GRO_CB(skb)->csum_cnt = 0;
4752 NAPI_GRO_CB(skb)->csum_valid = 0;
4753 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004754
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004755 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004756 break;
4757 }
4758 rcu_read_unlock();
4759
4760 if (&ptype->list == head)
4761 goto normal;
4762
Steffen Klassert25393d32017-02-15 09:39:44 +01004763 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4764 ret = GRO_CONSUMED;
4765 goto ok;
4766 }
4767
Herbert Xu0da2afd52008-12-26 14:57:42 -08004768 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004769 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004770
Herbert Xud565b0a2008-12-15 23:38:52 -08004771 if (pp) {
4772 struct sk_buff *nskb = *pp;
4773
4774 *pp = nskb->next;
4775 nskb->next = NULL;
4776 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00004777 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08004778 }
4779
Herbert Xu0da2afd52008-12-26 14:57:42 -08004780 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08004781 goto ok;
4782
Eric Dumazet600adc12014-01-09 14:12:19 -08004783 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08004784 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08004785
Eric Dumazet600adc12014-01-09 14:12:19 -08004786 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4787 struct sk_buff *nskb = napi->gro_list;
4788
4789 /* locate the end of the list to select the 'oldest' flow */
4790 while (nskb->next) {
4791 pp = &nskb->next;
4792 nskb = *pp;
4793 }
4794 *pp = NULL;
4795 nskb->next = NULL;
4796 napi_gro_complete(nskb);
4797 } else {
4798 napi->gro_count++;
4799 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004800 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004801 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004802 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004803 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004804 skb->next = napi->gro_list;
4805 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004806 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004807
Herbert Xuad0f9902009-02-01 01:24:55 -08004808pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004809 grow = skb_gro_offset(skb) - skb_headlen(skb);
4810 if (grow > 0)
4811 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004812ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004813 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004814
4815normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004816 ret = GRO_NORMAL;
4817 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004818}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004819
Jerry Chubf5a7552014-01-07 10:23:19 -08004820struct packet_offload *gro_find_receive_by_type(__be16 type)
4821{
4822 struct list_head *offload_head = &offload_base;
4823 struct packet_offload *ptype;
4824
4825 list_for_each_entry_rcu(ptype, offload_head, list) {
4826 if (ptype->type != type || !ptype->callbacks.gro_receive)
4827 continue;
4828 return ptype;
4829 }
4830 return NULL;
4831}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004832EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004833
4834struct packet_offload *gro_find_complete_by_type(__be16 type)
4835{
4836 struct list_head *offload_head = &offload_base;
4837 struct packet_offload *ptype;
4838
4839 list_for_each_entry_rcu(ptype, offload_head, list) {
4840 if (ptype->type != type || !ptype->callbacks.gro_complete)
4841 continue;
4842 return ptype;
4843 }
4844 return NULL;
4845}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004846EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004847
Rami Rosenbb728822012-11-28 21:55:25 +00004848static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004849{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004850 switch (ret) {
4851 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004852 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004853 ret = GRO_DROP;
4854 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004855
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004856 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004857 kfree_skb(skb);
4858 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004859
Eric Dumazetdaa86542012-04-19 07:07:40 +00004860 case GRO_MERGED_FREE:
Jesse Grossce87fc62016-01-20 17:59:49 -08004861 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4862 skb_dst_drop(skb);
Steffen Klassertf991bb92017-01-30 06:45:38 +01004863 secpath_reset(skb);
Eric Dumazetd7e88832012-04-30 08:10:34 +00004864 kmem_cache_free(skbuff_head_cache, skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004865 } else {
Eric Dumazetd7e88832012-04-30 08:10:34 +00004866 __kfree_skb(skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004867 }
Eric Dumazetdaa86542012-04-19 07:07:40 +00004868 break;
4869
Ben Hutchings5b252f02009-10-29 07:17:09 +00004870 case GRO_HELD:
4871 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004872 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004873 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004874 }
4875
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004876 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004877}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004878
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004879gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004880{
Eric Dumazet93f93a42015-11-18 06:30:59 -08004881 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004882 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004883
Eric Dumazeta50e2332014-03-29 21:28:21 -07004884 skb_gro_reset_offset(skb);
4885
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004886 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004887}
4888EXPORT_SYMBOL(napi_gro_receive);
4889
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004890static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004891{
Eric Dumazet93a35f52014-10-23 06:30:30 -07004892 if (unlikely(skb->pfmemalloc)) {
4893 consume_skb(skb);
4894 return;
4895 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004896 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004897 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4898 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004899 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004900 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004901 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07004902 skb->encapsulation = 0;
4903 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004904 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Steffen Klassertf991bb92017-01-30 06:45:38 +01004905 secpath_reset(skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004906
4907 napi->skb = skb;
4908}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004909
Herbert Xu76620aa2009-04-16 02:02:07 -07004910struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004911{
Herbert Xu5d38a072009-01-04 16:13:40 -08004912 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004913
4914 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08004915 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08004916 if (skb) {
4917 napi->skb = skb;
4918 skb_mark_napi_id(skb, napi);
4919 }
Herbert Xu5d38a072009-01-04 16:13:40 -08004920 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004921 return skb;
4922}
Herbert Xu76620aa2009-04-16 02:02:07 -07004923EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004924
Eric Dumazeta50e2332014-03-29 21:28:21 -07004925static gro_result_t napi_frags_finish(struct napi_struct *napi,
4926 struct sk_buff *skb,
4927 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004928{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004929 switch (ret) {
4930 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004931 case GRO_HELD:
4932 __skb_push(skb, ETH_HLEN);
4933 skb->protocol = eth_type_trans(skb, skb->dev);
4934 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004935 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004936 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004937
4938 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004939 case GRO_MERGED_FREE:
4940 napi_reuse_skb(napi, skb);
4941 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004942
4943 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004944 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004945 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004946 }
4947
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004948 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004949}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004950
Eric Dumazeta50e2332014-03-29 21:28:21 -07004951/* Upper GRO stack assumes network header starts at gro_offset=0
4952 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4953 * We copy ethernet header into skb->data to have a common layout.
4954 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004955static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004956{
Herbert Xu76620aa2009-04-16 02:02:07 -07004957 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004958 const struct ethhdr *eth;
4959 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07004960
4961 napi->skb = NULL;
4962
Eric Dumazeta50e2332014-03-29 21:28:21 -07004963 skb_reset_mac_header(skb);
4964 skb_gro_reset_offset(skb);
4965
4966 eth = skb_gro_header_fast(skb, 0);
4967 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4968 eth = skb_gro_header_slow(skb, hlen, 0);
4969 if (unlikely(!eth)) {
Aaron Conole4da46ce2016-04-02 15:26:43 -04004970 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
4971 __func__, napi->dev->name);
Eric Dumazeta50e2332014-03-29 21:28:21 -07004972 napi_reuse_skb(napi, skb);
4973 return NULL;
4974 }
4975 } else {
4976 gro_pull_from_frag0(skb, hlen);
4977 NAPI_GRO_CB(skb)->frag0 += hlen;
4978 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07004979 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07004980 __skb_pull(skb, hlen);
4981
4982 /*
4983 * This works because the only protocols we care about don't require
4984 * special handling.
4985 * We'll fix it up properly in napi_frags_finish()
4986 */
4987 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07004988
Herbert Xu76620aa2009-04-16 02:02:07 -07004989 return skb;
4990}
Herbert Xu76620aa2009-04-16 02:02:07 -07004991
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004992gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004993{
4994 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004995
4996 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004997 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004998
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004999 trace_napi_gro_frags_entry(skb);
5000
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005001 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08005002}
5003EXPORT_SYMBOL(napi_gro_frags);
5004
Tom Herbert573e8fc2014-08-22 13:33:47 -07005005/* Compute the checksum from gro_offset and return the folded value
5006 * after adding in any pseudo checksum.
5007 */
5008__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5009{
5010 __wsum wsum;
5011 __sum16 sum;
5012
5013 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5014
5015 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5016 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5017 if (likely(!sum)) {
5018 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5019 !skb->csum_complete_sw)
5020 netdev_rx_csum_fault(skb->dev);
5021 }
5022
5023 NAPI_GRO_CB(skb)->csum = wsum;
5024 NAPI_GRO_CB(skb)->csum_valid = 1;
5025
5026 return sum;
5027}
5028EXPORT_SYMBOL(__skb_gro_checksum_complete);
5029
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05305030static void net_rps_send_ipi(struct softnet_data *remsd)
5031{
5032#ifdef CONFIG_RPS
5033 while (remsd) {
5034 struct softnet_data *next = remsd->rps_ipi_next;
5035
5036 if (cpu_online(remsd->cpu))
5037 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5038 remsd = next;
5039 }
5040#endif
5041}
5042
Eric Dumazete326bed2010-04-22 00:22:45 -07005043/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08005044 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07005045 * Note: called with local irq disabled, but exits with local irq enabled.
5046 */
5047static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5048{
5049#ifdef CONFIG_RPS
5050 struct softnet_data *remsd = sd->rps_ipi_list;
5051
5052 if (remsd) {
5053 sd->rps_ipi_list = NULL;
5054
5055 local_irq_enable();
5056
5057 /* Send pending IPI's to kick RPS processing on remote cpus. */
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05305058 net_rps_send_ipi(remsd);
Eric Dumazete326bed2010-04-22 00:22:45 -07005059 } else
5060#endif
5061 local_irq_enable();
5062}
5063
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005064static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5065{
5066#ifdef CONFIG_RPS
5067 return sd->rps_ipi_list != NULL;
5068#else
5069 return false;
5070#endif
5071}
5072
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005073static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074{
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005075 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005076 bool again = true;
5077 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078
Eric Dumazete326bed2010-04-22 00:22:45 -07005079 /* Check if we have pending ipi, its better to send them now,
5080 * not waiting net_rx_action() end.
5081 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005082 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07005083 local_irq_disable();
5084 net_rps_action_and_irq_enable(sd);
5085 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005086
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01005087 napi->weight = dev_rx_weight;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005088 while (again) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090
Changli Gao6e7676c2010-04-27 15:07:33 -07005091 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03005092 rcu_read_lock();
Changli Gao6e7676c2010-04-27 15:07:33 -07005093 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03005094 rcu_read_unlock();
Tom Herbert76cc8b12010-05-20 18:37:59 +00005095 input_queue_head_incr(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005096 if (++work >= quota)
Tom Herbert76cc8b12010-05-20 18:37:59 +00005097 return work;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005098
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005101 local_irq_disable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005102 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07005103 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005104 /*
5105 * Inline a custom version of __napi_complete().
5106 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07005107 * and NAPI_STATE_SCHED is the only possible flag set
5108 * on backlog.
5109 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005110 * and we dont need an smp_mb() memory barrier.
5111 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005112 napi->state = 0;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005113 again = false;
5114 } else {
5115 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5116 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07005117 }
5118 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005119 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005122 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123}
5124
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005125/**
5126 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005127 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005128 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005129 * The entry's receive function will be scheduled to run.
5130 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005131 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08005132void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005133{
5134 unsigned long flags;
5135
5136 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05005137 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005138 local_irq_restore(flags);
5139}
5140EXPORT_SYMBOL(__napi_schedule);
5141
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005142/**
Eric Dumazet39e6c822017-02-28 10:34:50 -08005143 * napi_schedule_prep - check if napi can be scheduled
5144 * @n: napi context
5145 *
5146 * Test if NAPI routine is already running, and if not mark
5147 * it as running. This is used as a condition variable
5148 * insure only one NAPI poll instance runs. We also make
5149 * sure there is no pending NAPI disable.
5150 */
5151bool napi_schedule_prep(struct napi_struct *n)
5152{
5153 unsigned long val, new;
5154
5155 do {
5156 val = READ_ONCE(n->state);
5157 if (unlikely(val & NAPIF_STATE_DISABLE))
5158 return false;
5159 new = val | NAPIF_STATE_SCHED;
5160
5161 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5162 * This was suggested by Alexander Duyck, as compiler
5163 * emits better code than :
5164 * if (val & NAPIF_STATE_SCHED)
5165 * new |= NAPIF_STATE_MISSED;
5166 */
5167 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5168 NAPIF_STATE_MISSED;
5169 } while (cmpxchg(&n->state, val, new) != val);
5170
5171 return !(val & NAPIF_STATE_SCHED);
5172}
5173EXPORT_SYMBOL(napi_schedule_prep);
5174
5175/**
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005176 * __napi_schedule_irqoff - schedule for receive
5177 * @n: entry to schedule
5178 *
5179 * Variant of __napi_schedule() assuming hard irqs are masked
5180 */
5181void __napi_schedule_irqoff(struct napi_struct *n)
5182{
5183 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5184}
5185EXPORT_SYMBOL(__napi_schedule_irqoff);
5186
Eric Dumazet364b6052016-11-15 10:15:13 -08005187bool napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08005188{
Eric Dumazet39e6c822017-02-28 10:34:50 -08005189 unsigned long flags, val, new;
Herbert Xud565b0a2008-12-15 23:38:52 -08005190
5191 /*
Eric Dumazet217f6972016-11-15 10:15:11 -08005192 * 1) Don't let napi dequeue from the cpu poll list
5193 * just in case its running on a different cpu.
5194 * 2) If we are busy polling, do nothing here, we have
5195 * the guarantee we will be called later.
Herbert Xud565b0a2008-12-15 23:38:52 -08005196 */
Eric Dumazet217f6972016-11-15 10:15:11 -08005197 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5198 NAPIF_STATE_IN_BUSY_POLL)))
Eric Dumazet364b6052016-11-15 10:15:13 -08005199 return false;
Herbert Xud565b0a2008-12-15 23:38:52 -08005200
Eric Dumazet3b47d302014-11-06 21:09:44 -08005201 if (n->gro_list) {
5202 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005203
Eric Dumazet3b47d302014-11-06 21:09:44 -08005204 if (work_done)
5205 timeout = n->dev->gro_flush_timeout;
5206
5207 if (timeout)
5208 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5209 HRTIMER_MODE_REL_PINNED);
5210 else
5211 napi_gro_flush(n, false);
5212 }
Eric Dumazet02c16022017-02-04 15:25:02 -08005213 if (unlikely(!list_empty(&n->poll_list))) {
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005214 /* If n->poll_list is not empty, we need to mask irqs */
5215 local_irq_save(flags);
Eric Dumazet02c16022017-02-04 15:25:02 -08005216 list_del_init(&n->poll_list);
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005217 local_irq_restore(flags);
5218 }
Eric Dumazet39e6c822017-02-28 10:34:50 -08005219
5220 do {
5221 val = READ_ONCE(n->state);
5222
5223 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5224
5225 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5226
5227 /* If STATE_MISSED was set, leave STATE_SCHED set,
5228 * because we will call napi->poll() one more time.
5229 * This C code was suggested by Alexander Duyck to help gcc.
5230 */
5231 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5232 NAPIF_STATE_SCHED;
5233 } while (cmpxchg(&n->state, val, new) != val);
5234
5235 if (unlikely(val & NAPIF_STATE_MISSED)) {
5236 __napi_schedule(n);
5237 return false;
5238 }
5239
Eric Dumazet364b6052016-11-15 10:15:13 -08005240 return true;
Herbert Xud565b0a2008-12-15 23:38:52 -08005241}
Eric Dumazet3b47d302014-11-06 21:09:44 -08005242EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08005243
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005244/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08005245static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005246{
5247 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5248 struct napi_struct *napi;
5249
5250 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5251 if (napi->napi_id == napi_id)
5252 return napi;
5253
5254 return NULL;
5255}
Eric Dumazet02d62e82015-11-18 06:30:52 -08005256
5257#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazet217f6972016-11-15 10:15:11 -08005258
Eric Dumazetce6aea92015-11-18 06:30:54 -08005259#define BUSY_POLL_BUDGET 8
Eric Dumazet217f6972016-11-15 10:15:11 -08005260
5261static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5262{
5263 int rc;
5264
Eric Dumazet39e6c822017-02-28 10:34:50 -08005265 /* Busy polling means there is a high chance device driver hard irq
5266 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5267 * set in napi_schedule_prep().
5268 * Since we are about to call napi->poll() once more, we can safely
5269 * clear NAPI_STATE_MISSED.
5270 *
5271 * Note: x86 could use a single "lock and ..." instruction
5272 * to perform these two clear_bit()
5273 */
5274 clear_bit(NAPI_STATE_MISSED, &napi->state);
Eric Dumazet217f6972016-11-15 10:15:11 -08005275 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5276
5277 local_bh_disable();
5278
5279 /* All we really want here is to re-enable device interrupts.
5280 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5281 */
5282 rc = napi->poll(napi, BUSY_POLL_BUDGET);
5283 netpoll_poll_unlock(have_poll_lock);
5284 if (rc == BUSY_POLL_BUDGET)
5285 __napi_schedule(napi);
5286 local_bh_enable();
5287 if (local_softirq_pending())
5288 do_softirq();
5289}
5290
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005291void napi_busy_loop(unsigned int napi_id,
5292 bool (*loop_end)(void *, unsigned long),
5293 void *loop_end_arg)
Eric Dumazet02d62e82015-11-18 06:30:52 -08005294{
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005295 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
Eric Dumazet217f6972016-11-15 10:15:11 -08005296 int (*napi_poll)(struct napi_struct *napi, int budget);
Eric Dumazet217f6972016-11-15 10:15:11 -08005297 void *have_poll_lock = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005298 struct napi_struct *napi;
Eric Dumazet217f6972016-11-15 10:15:11 -08005299
5300restart:
Eric Dumazet217f6972016-11-15 10:15:11 -08005301 napi_poll = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005302
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005303 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005304
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005305 napi = napi_by_id(napi_id);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005306 if (!napi)
5307 goto out;
5308
Eric Dumazet217f6972016-11-15 10:15:11 -08005309 preempt_disable();
5310 for (;;) {
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005311 int work = 0;
5312
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005313 local_bh_disable();
Eric Dumazet217f6972016-11-15 10:15:11 -08005314 if (!napi_poll) {
5315 unsigned long val = READ_ONCE(napi->state);
5316
5317 /* If multiple threads are competing for this napi,
5318 * we avoid dirtying napi->state as much as we can.
5319 */
5320 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5321 NAPIF_STATE_IN_BUSY_POLL))
5322 goto count;
5323 if (cmpxchg(&napi->state, val,
5324 val | NAPIF_STATE_IN_BUSY_POLL |
5325 NAPIF_STATE_SCHED) != val)
5326 goto count;
5327 have_poll_lock = netpoll_poll_lock(napi);
5328 napi_poll = napi->poll;
5329 }
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005330 work = napi_poll(napi, BUSY_POLL_BUDGET);
5331 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
Eric Dumazet217f6972016-11-15 10:15:11 -08005332count:
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005333 if (work > 0)
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005334 __NET_ADD_STATS(dev_net(napi->dev),
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005335 LINUX_MIB_BUSYPOLLRXPACKETS, work);
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005336 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005337
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005338 if (!loop_end || loop_end(loop_end_arg, start_time))
Eric Dumazet217f6972016-11-15 10:15:11 -08005339 break;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005340
Eric Dumazet217f6972016-11-15 10:15:11 -08005341 if (unlikely(need_resched())) {
5342 if (napi_poll)
5343 busy_poll_stop(napi, have_poll_lock);
5344 preempt_enable();
5345 rcu_read_unlock();
5346 cond_resched();
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005347 if (loop_end(loop_end_arg, start_time))
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005348 return;
Eric Dumazet217f6972016-11-15 10:15:11 -08005349 goto restart;
5350 }
Linus Torvalds6cdf89b2016-12-12 10:48:02 -08005351 cpu_relax();
Eric Dumazet217f6972016-11-15 10:15:11 -08005352 }
5353 if (napi_poll)
5354 busy_poll_stop(napi, have_poll_lock);
5355 preempt_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005356out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005357 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005358}
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005359EXPORT_SYMBOL(napi_busy_loop);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005360
5361#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005362
Eric Dumazet149d6ad2016-11-08 11:07:28 -08005363static void napi_hash_add(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005364{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005365 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5366 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005367 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005368
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005369 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005370
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005371 /* 0..NR_CPUS range is reserved for sender_cpu use */
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005372 do {
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005373 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5374 napi_gen_id = MIN_NAPI_ID;
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005375 } while (napi_by_id(napi_gen_id));
5376 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005377
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005378 hlist_add_head_rcu(&napi->napi_hash_node,
5379 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005380
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005381 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005382}
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005383
5384/* Warning : caller is responsible to make sure rcu grace period
5385 * is respected before freeing memory containing @napi
5386 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08005387bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005388{
Eric Dumazet34cbe272015-11-18 06:31:02 -08005389 bool rcu_sync_needed = false;
5390
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005391 spin_lock(&napi_hash_lock);
5392
Eric Dumazet34cbe272015-11-18 06:31:02 -08005393 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5394 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005395 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005396 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005397 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005398 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005399}
5400EXPORT_SYMBOL_GPL(napi_hash_del);
5401
Eric Dumazet3b47d302014-11-06 21:09:44 -08005402static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5403{
5404 struct napi_struct *napi;
5405
5406 napi = container_of(timer, struct napi_struct, timer);
Eric Dumazet39e6c822017-02-28 10:34:50 -08005407
5408 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5409 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5410 */
5411 if (napi->gro_list && !napi_disable_pending(napi) &&
5412 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5413 __napi_schedule_irqoff(napi);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005414
5415 return HRTIMER_NORESTART;
5416}
5417
Herbert Xud565b0a2008-12-15 23:38:52 -08005418void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5419 int (*poll)(struct napi_struct *, int), int weight)
5420{
5421 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005422 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5423 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00005424 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005425 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08005426 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08005427 napi->poll = poll;
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00005428 if (weight > NAPI_POLL_WEIGHT)
5429 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5430 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08005431 napi->weight = weight;
5432 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005433 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08005434#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08005435 napi->poll_owner = -1;
5436#endif
5437 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08005438 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005439}
5440EXPORT_SYMBOL(netif_napi_add);
5441
Eric Dumazet3b47d302014-11-06 21:09:44 -08005442void napi_disable(struct napi_struct *n)
5443{
5444 might_sleep();
5445 set_bit(NAPI_STATE_DISABLE, &n->state);
5446
5447 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5448 msleep(1);
Neil Horman2d8bff1262015-09-23 14:57:58 -04005449 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5450 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005451
5452 hrtimer_cancel(&n->timer);
5453
5454 clear_bit(NAPI_STATE_DISABLE, &n->state);
5455}
5456EXPORT_SYMBOL(napi_disable);
5457
Eric Dumazet93d05d42015-11-18 06:31:03 -08005458/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08005459void netif_napi_del(struct napi_struct *napi)
5460{
Eric Dumazet93d05d42015-11-18 06:31:03 -08005461 might_sleep();
5462 if (napi_hash_del(napi))
5463 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08005464 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07005465 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005466
Eric Dumazet289dccb2013-12-20 14:29:08 -08005467 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005468 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005469 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005470}
5471EXPORT_SYMBOL(netif_napi_del);
5472
Herbert Xu726ce702014-12-21 07:16:21 +11005473static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5474{
5475 void *have;
5476 int work, weight;
5477
5478 list_del_init(&n->poll_list);
5479
5480 have = netpoll_poll_lock(n);
5481
5482 weight = n->weight;
5483
5484 /* This NAPI_STATE_SCHED test is for avoiding a race
5485 * with netpoll's poll_napi(). Only the entity which
5486 * obtains the lock and sees NAPI_STATE_SCHED set will
5487 * actually make the ->poll() call. Therefore we avoid
5488 * accidentally calling ->poll() when NAPI is not scheduled.
5489 */
5490 work = 0;
5491 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5492 work = n->poll(n, weight);
Jesper Dangaard Brouer1db19db2016-07-07 18:01:32 +02005493 trace_napi_poll(n, work, weight);
Herbert Xu726ce702014-12-21 07:16:21 +11005494 }
5495
5496 WARN_ON_ONCE(work > weight);
5497
5498 if (likely(work < weight))
5499 goto out_unlock;
5500
5501 /* Drivers must not modify the NAPI state if they
5502 * consume the entire weight. In such cases this code
5503 * still "owns" the NAPI instance and therefore can
5504 * move the instance around on the list at-will.
5505 */
5506 if (unlikely(napi_disable_pending(n))) {
5507 napi_complete(n);
5508 goto out_unlock;
5509 }
5510
5511 if (n->gro_list) {
5512 /* flush too old packets
5513 * If HZ < 1000, flush all packets.
5514 */
5515 napi_gro_flush(n, HZ >= 1000);
5516 }
5517
Herbert Xu001ce542014-12-21 07:16:22 +11005518 /* Some drivers may have called napi_schedule
5519 * prior to exhausting their budget.
5520 */
5521 if (unlikely(!list_empty(&n->poll_list))) {
5522 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5523 n->dev ? n->dev->name : "backlog");
5524 goto out_unlock;
5525 }
5526
Herbert Xu726ce702014-12-21 07:16:21 +11005527 list_add_tail(&n->poll_list, repoll);
5528
5529out_unlock:
5530 netpoll_poll_unlock(have);
5531
5532 return work;
5533}
5534
Emese Revfy0766f782016-06-20 20:42:34 +02005535static __latent_entropy void net_rx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536{
Christoph Lameter903ceff2014-08-17 12:30:35 -05005537 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04005538 unsigned long time_limit = jiffies +
5539 usecs_to_jiffies(netdev_budget_usecs);
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07005540 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005541 LIST_HEAD(list);
5542 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07005543
Linus Torvalds1da177e2005-04-16 15:20:36 -07005544 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005545 list_splice_init(&sd->poll_list, &list);
5546 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005547
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005548 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005549 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005550
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005551 if (list_empty(&list)) {
5552 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005553 goto out;
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005554 break;
5555 }
5556
Herbert Xu6bd373e2014-12-21 07:16:24 +11005557 n = list_first_entry(&list, struct napi_struct, poll_list);
5558 budget -= napi_poll(n, &repoll);
5559
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005560 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005561 * Allow this to run for 2 jiffies since which will allow
5562 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005563 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005564 if (unlikely(budget <= 0 ||
5565 time_after_eq(jiffies, time_limit))) {
5566 sd->time_squeeze++;
5567 break;
5568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005570
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005571 local_irq_disable();
5572
5573 list_splice_tail_init(&sd->poll_list, &list);
5574 list_splice_tail(&repoll, &list);
5575 list_splice(&list, &sd->poll_list);
5576 if (!list_empty(&sd->poll_list))
5577 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5578
Eric Dumazete326bed2010-04-22 00:22:45 -07005579 net_rps_action_and_irq_enable(sd);
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005580out:
5581 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582}
5583
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005584struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005585 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005586
5587 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005588 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005589
Veaceslav Falico5d261912013-08-28 23:25:05 +02005590 /* counter for the number of times this device was added to us */
5591 u16 ref_nr;
5592
Veaceslav Falico402dae92013-09-25 09:20:09 +02005593 /* private field for the users */
5594 void *private;
5595
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005596 struct list_head list;
5597 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005598};
5599
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005600static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005601 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005602{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005603 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005604
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005605 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005606 if (adj->dev == adj_dev)
5607 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005608 }
5609 return NULL;
5610}
5611
David Ahernf1170fd2016-10-17 19:15:51 -07005612static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5613{
5614 struct net_device *dev = data;
5615
5616 return upper_dev == dev;
5617}
5618
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005619/**
5620 * netdev_has_upper_dev - Check if device is linked to an upper device
5621 * @dev: device
5622 * @upper_dev: upper device to check
5623 *
5624 * Find out if a device is linked to specified upper device and return true
5625 * in case it is. Note that this checks only immediate upper device,
5626 * not through a complete stack of devices. The caller must hold the RTNL lock.
5627 */
5628bool netdev_has_upper_dev(struct net_device *dev,
5629 struct net_device *upper_dev)
5630{
5631 ASSERT_RTNL();
5632
David Ahernf1170fd2016-10-17 19:15:51 -07005633 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5634 upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005635}
5636EXPORT_SYMBOL(netdev_has_upper_dev);
5637
5638/**
David Ahern1a3f0602016-10-17 19:15:44 -07005639 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5640 * @dev: device
5641 * @upper_dev: upper device to check
5642 *
5643 * Find out if a device is linked to specified upper device and return true
5644 * in case it is. Note that this checks the entire upper device chain.
5645 * The caller must hold rcu lock.
5646 */
5647
David Ahern1a3f0602016-10-17 19:15:44 -07005648bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5649 struct net_device *upper_dev)
5650{
5651 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5652 upper_dev);
5653}
5654EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5655
5656/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005657 * netdev_has_any_upper_dev - Check if device is linked to some device
5658 * @dev: device
5659 *
5660 * Find out if a device is linked to an upper device and return true in case
5661 * it is. The caller must hold the RTNL lock.
5662 */
stephen hemminger1d143d92013-12-29 14:01:29 -08005663static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005664{
5665 ASSERT_RTNL();
5666
David Ahernf1170fd2016-10-17 19:15:51 -07005667 return !list_empty(&dev->adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005668}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005669
5670/**
5671 * netdev_master_upper_dev_get - Get master upper device
5672 * @dev: device
5673 *
5674 * Find a master upper device and return pointer to it or NULL in case
5675 * it's not there. The caller must hold the RTNL lock.
5676 */
5677struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5678{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005679 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005680
5681 ASSERT_RTNL();
5682
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005683 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005684 return NULL;
5685
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005686 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005687 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005688 if (likely(upper->master))
5689 return upper->dev;
5690 return NULL;
5691}
5692EXPORT_SYMBOL(netdev_master_upper_dev_get);
5693
David Ahern0f524a82016-10-17 19:15:52 -07005694/**
5695 * netdev_has_any_lower_dev - Check if device is linked to some device
5696 * @dev: device
5697 *
5698 * Find out if a device is linked to a lower device and return true in case
5699 * it is. The caller must hold the RTNL lock.
5700 */
5701static bool netdev_has_any_lower_dev(struct net_device *dev)
5702{
5703 ASSERT_RTNL();
5704
5705 return !list_empty(&dev->adj_list.lower);
5706}
5707
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02005708void *netdev_adjacent_get_private(struct list_head *adj_list)
5709{
5710 struct netdev_adjacent *adj;
5711
5712 adj = list_entry(adj_list, struct netdev_adjacent, list);
5713
5714 return adj->private;
5715}
5716EXPORT_SYMBOL(netdev_adjacent_get_private);
5717
Veaceslav Falico31088a12013-09-25 09:20:12 +02005718/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04005719 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5720 * @dev: device
5721 * @iter: list_head ** of the current position
5722 *
5723 * Gets the next device from the dev's upper list, starting from iter
5724 * position. The caller must hold RCU read lock.
5725 */
5726struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5727 struct list_head **iter)
5728{
5729 struct netdev_adjacent *upper;
5730
5731 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5732
5733 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5734
5735 if (&upper->list == &dev->adj_list.upper)
5736 return NULL;
5737
5738 *iter = &upper->list;
5739
5740 return upper->dev;
5741}
5742EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5743
David Ahern1a3f0602016-10-17 19:15:44 -07005744static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5745 struct list_head **iter)
5746{
5747 struct netdev_adjacent *upper;
5748
5749 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5750
5751 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5752
5753 if (&upper->list == &dev->adj_list.upper)
5754 return NULL;
5755
5756 *iter = &upper->list;
5757
5758 return upper->dev;
5759}
5760
5761int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5762 int (*fn)(struct net_device *dev,
5763 void *data),
5764 void *data)
5765{
5766 struct net_device *udev;
5767 struct list_head *iter;
5768 int ret;
5769
5770 for (iter = &dev->adj_list.upper,
5771 udev = netdev_next_upper_dev_rcu(dev, &iter);
5772 udev;
5773 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5774 /* first is the upper device itself */
5775 ret = fn(udev, data);
5776 if (ret)
5777 return ret;
5778
5779 /* then look at all of its upper devices */
5780 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5781 if (ret)
5782 return ret;
5783 }
5784
5785 return 0;
5786}
5787EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5788
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005789/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005790 * netdev_lower_get_next_private - Get the next ->private from the
5791 * lower neighbour list
5792 * @dev: device
5793 * @iter: list_head ** of the current position
5794 *
5795 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5796 * list, starting from iter position. The caller must hold either hold the
5797 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005798 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02005799 */
5800void *netdev_lower_get_next_private(struct net_device *dev,
5801 struct list_head **iter)
5802{
5803 struct netdev_adjacent *lower;
5804
5805 lower = list_entry(*iter, struct netdev_adjacent, list);
5806
5807 if (&lower->list == &dev->adj_list.lower)
5808 return NULL;
5809
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005810 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005811
5812 return lower->private;
5813}
5814EXPORT_SYMBOL(netdev_lower_get_next_private);
5815
5816/**
5817 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5818 * lower neighbour list, RCU
5819 * variant
5820 * @dev: device
5821 * @iter: list_head ** of the current position
5822 *
5823 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5824 * list, starting from iter position. The caller must hold RCU read lock.
5825 */
5826void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5827 struct list_head **iter)
5828{
5829 struct netdev_adjacent *lower;
5830
5831 WARN_ON_ONCE(!rcu_read_lock_held());
5832
5833 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5834
5835 if (&lower->list == &dev->adj_list.lower)
5836 return NULL;
5837
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005838 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005839
5840 return lower->private;
5841}
5842EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5843
5844/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005845 * netdev_lower_get_next - Get the next device from the lower neighbour
5846 * list
5847 * @dev: device
5848 * @iter: list_head ** of the current position
5849 *
5850 * Gets the next netdev_adjacent from the dev's lower neighbour
5851 * list, starting from iter position. The caller must hold RTNL lock or
5852 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005853 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005854 */
5855void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5856{
5857 struct netdev_adjacent *lower;
5858
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005859 lower = list_entry(*iter, struct netdev_adjacent, list);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005860
5861 if (&lower->list == &dev->adj_list.lower)
5862 return NULL;
5863
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005864 *iter = lower->list.next;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005865
5866 return lower->dev;
5867}
5868EXPORT_SYMBOL(netdev_lower_get_next);
5869
David Ahern1a3f0602016-10-17 19:15:44 -07005870static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5871 struct list_head **iter)
5872{
5873 struct netdev_adjacent *lower;
5874
David Ahern46b5ab12016-10-26 13:21:33 -07005875 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
David Ahern1a3f0602016-10-17 19:15:44 -07005876
5877 if (&lower->list == &dev->adj_list.lower)
5878 return NULL;
5879
David Ahern46b5ab12016-10-26 13:21:33 -07005880 *iter = &lower->list;
David Ahern1a3f0602016-10-17 19:15:44 -07005881
5882 return lower->dev;
5883}
5884
5885int netdev_walk_all_lower_dev(struct net_device *dev,
5886 int (*fn)(struct net_device *dev,
5887 void *data),
5888 void *data)
5889{
5890 struct net_device *ldev;
5891 struct list_head *iter;
5892 int ret;
5893
5894 for (iter = &dev->adj_list.lower,
5895 ldev = netdev_next_lower_dev(dev, &iter);
5896 ldev;
5897 ldev = netdev_next_lower_dev(dev, &iter)) {
5898 /* first is the lower device itself */
5899 ret = fn(ldev, data);
5900 if (ret)
5901 return ret;
5902
5903 /* then look at all of its lower devices */
5904 ret = netdev_walk_all_lower_dev(ldev, fn, data);
5905 if (ret)
5906 return ret;
5907 }
5908
5909 return 0;
5910}
5911EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
5912
David Ahern1a3f0602016-10-17 19:15:44 -07005913static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5914 struct list_head **iter)
5915{
5916 struct netdev_adjacent *lower;
5917
5918 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5919 if (&lower->list == &dev->adj_list.lower)
5920 return NULL;
5921
5922 *iter = &lower->list;
5923
5924 return lower->dev;
5925}
5926
5927int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5928 int (*fn)(struct net_device *dev,
5929 void *data),
5930 void *data)
5931{
5932 struct net_device *ldev;
5933 struct list_head *iter;
5934 int ret;
5935
5936 for (iter = &dev->adj_list.lower,
5937 ldev = netdev_next_lower_dev_rcu(dev, &iter);
5938 ldev;
5939 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
5940 /* first is the lower device itself */
5941 ret = fn(ldev, data);
5942 if (ret)
5943 return ret;
5944
5945 /* then look at all of its lower devices */
5946 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
5947 if (ret)
5948 return ret;
5949 }
5950
5951 return 0;
5952}
5953EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
5954
Jiri Pirko7ce856a2016-07-04 08:23:12 +02005955/**
dingtianhonge001bfa2013-12-13 10:19:55 +08005956 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5957 * lower neighbour list, RCU
5958 * variant
5959 * @dev: device
5960 *
5961 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5962 * list. The caller must hold RCU read lock.
5963 */
5964void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5965{
5966 struct netdev_adjacent *lower;
5967
5968 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5969 struct netdev_adjacent, list);
5970 if (lower)
5971 return lower->private;
5972 return NULL;
5973}
5974EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5975
5976/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005977 * netdev_master_upper_dev_get_rcu - Get master upper device
5978 * @dev: device
5979 *
5980 * Find a master upper device and return pointer to it or NULL in case
5981 * it's not there. The caller must hold the RCU read lock.
5982 */
5983struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5984{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005985 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005986
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005987 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005988 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005989 if (upper && likely(upper->master))
5990 return upper->dev;
5991 return NULL;
5992}
5993EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5994
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305995static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005996 struct net_device *adj_dev,
5997 struct list_head *dev_list)
5998{
5999 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11006000
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006001 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6002 "upper_%s" : "lower_%s", adj_dev->name);
6003 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6004 linkname);
6005}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05306006static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006007 char *name,
6008 struct list_head *dev_list)
6009{
6010 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11006011
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006012 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6013 "upper_%s" : "lower_%s", name);
6014 sysfs_remove_link(&(dev->dev.kobj), linkname);
6015}
6016
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006017static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6018 struct net_device *adj_dev,
6019 struct list_head *dev_list)
6020{
6021 return (dev_list == &dev->adj_list.upper ||
6022 dev_list == &dev->adj_list.lower) &&
6023 net_eq(dev_net(dev), dev_net(adj_dev));
6024}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006025
Veaceslav Falico5d261912013-08-28 23:25:05 +02006026static int __netdev_adjacent_dev_insert(struct net_device *dev,
6027 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02006028 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006029 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006030{
6031 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006032 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006033
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006034 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006035
6036 if (adj) {
David Ahern790510d2016-10-17 19:15:43 -07006037 adj->ref_nr += 1;
David Ahern67b62f92016-10-17 19:15:53 -07006038 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6039 dev->name, adj_dev->name, adj->ref_nr);
6040
Veaceslav Falico5d261912013-08-28 23:25:05 +02006041 return 0;
6042 }
6043
6044 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6045 if (!adj)
6046 return -ENOMEM;
6047
6048 adj->dev = adj_dev;
6049 adj->master = master;
David Ahern790510d2016-10-17 19:15:43 -07006050 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006051 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006052 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006053
David Ahern67b62f92016-10-17 19:15:53 -07006054 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6055 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006056
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006057 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006058 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006059 if (ret)
6060 goto free_adj;
6061 }
6062
Veaceslav Falico7863c052013-09-25 09:20:06 +02006063 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006064 if (master) {
6065 ret = sysfs_create_link(&(dev->dev.kobj),
6066 &(adj_dev->dev.kobj), "master");
6067 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02006068 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006069
Veaceslav Falico7863c052013-09-25 09:20:06 +02006070 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006071 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02006072 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006073 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006074
6075 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006076
Veaceslav Falico5831d662013-09-25 09:20:32 +02006077remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006078 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006079 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006080free_adj:
6081 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02006082 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006083
6084 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006085}
6086
stephen hemminger1d143d92013-12-29 14:01:29 -08006087static void __netdev_adjacent_dev_remove(struct net_device *dev,
6088 struct net_device *adj_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006089 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006090 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006091{
6092 struct netdev_adjacent *adj;
6093
David Ahern67b62f92016-10-17 19:15:53 -07006094 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6095 dev->name, adj_dev->name, ref_nr);
6096
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006097 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006098
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006099 if (!adj) {
David Ahern67b62f92016-10-17 19:15:53 -07006100 pr_err("Adjacency does not exist for device %s from %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006101 dev->name, adj_dev->name);
David Ahern67b62f92016-10-17 19:15:53 -07006102 WARN_ON(1);
6103 return;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006104 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006105
Andrew Collins93409032016-10-03 13:43:02 -06006106 if (adj->ref_nr > ref_nr) {
David Ahern67b62f92016-10-17 19:15:53 -07006107 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6108 dev->name, adj_dev->name, ref_nr,
6109 adj->ref_nr - ref_nr);
Andrew Collins93409032016-10-03 13:43:02 -06006110 adj->ref_nr -= ref_nr;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006111 return;
6112 }
6113
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006114 if (adj->master)
6115 sysfs_remove_link(&(dev->dev.kobj), "master");
6116
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006117 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006118 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006119
Veaceslav Falico5d261912013-08-28 23:25:05 +02006120 list_del_rcu(&adj->list);
David Ahern67b62f92016-10-17 19:15:53 -07006121 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006122 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006123 dev_put(adj_dev);
6124 kfree_rcu(adj, rcu);
6125}
6126
stephen hemminger1d143d92013-12-29 14:01:29 -08006127static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6128 struct net_device *upper_dev,
6129 struct list_head *up_list,
6130 struct list_head *down_list,
6131 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006132{
6133 int ret;
6134
David Ahern790510d2016-10-17 19:15:43 -07006135 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
Andrew Collins93409032016-10-03 13:43:02 -06006136 private, master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006137 if (ret)
6138 return ret;
6139
David Ahern790510d2016-10-17 19:15:43 -07006140 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
Andrew Collins93409032016-10-03 13:43:02 -06006141 private, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006142 if (ret) {
David Ahern790510d2016-10-17 19:15:43 -07006143 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006144 return ret;
6145 }
6146
6147 return 0;
6148}
6149
stephen hemminger1d143d92013-12-29 14:01:29 -08006150static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6151 struct net_device *upper_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006152 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006153 struct list_head *up_list,
6154 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006155{
Andrew Collins93409032016-10-03 13:43:02 -06006156 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6157 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006158}
6159
stephen hemminger1d143d92013-12-29 14:01:29 -08006160static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6161 struct net_device *upper_dev,
6162 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006163{
David Ahernf1170fd2016-10-17 19:15:51 -07006164 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6165 &dev->adj_list.upper,
6166 &upper_dev->adj_list.lower,
6167 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006168}
6169
stephen hemminger1d143d92013-12-29 14:01:29 -08006170static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6171 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006172{
Andrew Collins93409032016-10-03 13:43:02 -06006173 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006174 &dev->adj_list.upper,
6175 &upper_dev->adj_list.lower);
6176}
Veaceslav Falico5d261912013-08-28 23:25:05 +02006177
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006178static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006179 struct net_device *upper_dev, bool master,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006180 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006181{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006182 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006183 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006184
6185 ASSERT_RTNL();
6186
6187 if (dev == upper_dev)
6188 return -EBUSY;
6189
6190 /* To prevent loops, check if dev is not upper device to upper_dev. */
David Ahernf1170fd2016-10-17 19:15:51 -07006191 if (netdev_has_upper_dev(upper_dev, dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006192 return -EBUSY;
6193
David Ahernf1170fd2016-10-17 19:15:51 -07006194 if (netdev_has_upper_dev(dev, upper_dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006195 return -EEXIST;
6196
6197 if (master && netdev_master_upper_dev_get(dev))
6198 return -EBUSY;
6199
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006200 changeupper_info.upper_dev = upper_dev;
6201 changeupper_info.master = master;
6202 changeupper_info.linking = true;
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006203 changeupper_info.upper_info = upper_info;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006204
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006205 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6206 &changeupper_info.info);
6207 ret = notifier_to_errno(ret);
6208 if (ret)
6209 return ret;
6210
Jiri Pirko6dffb042015-12-03 12:12:10 +01006211 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006212 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006213 if (ret)
6214 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006215
Ido Schimmelb03804e2015-12-03 12:12:03 +01006216 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6217 &changeupper_info.info);
6218 ret = notifier_to_errno(ret);
6219 if (ret)
David Ahernf1170fd2016-10-17 19:15:51 -07006220 goto rollback;
Ido Schimmelb03804e2015-12-03 12:12:03 +01006221
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006222 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006223
David Ahernf1170fd2016-10-17 19:15:51 -07006224rollback:
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006225 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006226
6227 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006228}
6229
6230/**
6231 * netdev_upper_dev_link - Add a link to the upper device
6232 * @dev: device
6233 * @upper_dev: new upper device
6234 *
6235 * Adds a link to device which is upper to this one. The caller must hold
6236 * the RTNL lock. On a failure a negative errno code is returned.
6237 * On success the reference counts are adjusted and the function
6238 * returns zero.
6239 */
6240int netdev_upper_dev_link(struct net_device *dev,
6241 struct net_device *upper_dev)
6242{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006243 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006244}
6245EXPORT_SYMBOL(netdev_upper_dev_link);
6246
6247/**
6248 * netdev_master_upper_dev_link - Add a master link to the upper device
6249 * @dev: device
6250 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01006251 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006252 * @upper_info: upper info to be passed down via notifier
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006253 *
6254 * Adds a link to device which is upper to this one. In this case, only
6255 * one master upper device can be linked, although other non-master devices
6256 * might be linked as well. The caller must hold the RTNL lock.
6257 * On a failure a negative errno code is returned. On success the reference
6258 * counts are adjusted and the function returns zero.
6259 */
6260int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01006261 struct net_device *upper_dev,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006262 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006263{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006264 return __netdev_upper_dev_link(dev, upper_dev, true,
6265 upper_priv, upper_info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006266}
6267EXPORT_SYMBOL(netdev_master_upper_dev_link);
6268
6269/**
6270 * netdev_upper_dev_unlink - Removes a link to upper device
6271 * @dev: device
6272 * @upper_dev: new upper device
6273 *
6274 * Removes a link to device which is upper to this one. The caller must hold
6275 * the RTNL lock.
6276 */
6277void netdev_upper_dev_unlink(struct net_device *dev,
6278 struct net_device *upper_dev)
6279{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006280 struct netdev_notifier_changeupper_info changeupper_info;
tchardingf4563a72017-02-09 17:56:07 +11006281
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006282 ASSERT_RTNL();
6283
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006284 changeupper_info.upper_dev = upper_dev;
6285 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
6286 changeupper_info.linking = false;
6287
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006288 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6289 &changeupper_info.info);
6290
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006291 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006292
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006293 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6294 &changeupper_info.info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006295}
6296EXPORT_SYMBOL(netdev_upper_dev_unlink);
6297
Moni Shoua61bd3852015-02-03 16:48:29 +02006298/**
6299 * netdev_bonding_info_change - Dispatch event about slave change
6300 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09006301 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02006302 *
6303 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6304 * The caller must hold the RTNL lock.
6305 */
6306void netdev_bonding_info_change(struct net_device *dev,
6307 struct netdev_bonding_info *bonding_info)
6308{
6309 struct netdev_notifier_bonding_info info;
6310
6311 memcpy(&info.bonding_info, bonding_info,
6312 sizeof(struct netdev_bonding_info));
6313 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
6314 &info.info);
6315}
6316EXPORT_SYMBOL(netdev_bonding_info_change);
6317
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006318static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006319{
6320 struct netdev_adjacent *iter;
6321
6322 struct net *net = dev_net(dev);
6323
6324 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006325 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006326 continue;
6327 netdev_adjacent_sysfs_add(iter->dev, dev,
6328 &iter->dev->adj_list.lower);
6329 netdev_adjacent_sysfs_add(dev, iter->dev,
6330 &dev->adj_list.upper);
6331 }
6332
6333 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006334 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006335 continue;
6336 netdev_adjacent_sysfs_add(iter->dev, dev,
6337 &iter->dev->adj_list.upper);
6338 netdev_adjacent_sysfs_add(dev, iter->dev,
6339 &dev->adj_list.lower);
6340 }
6341}
6342
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006343static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006344{
6345 struct netdev_adjacent *iter;
6346
6347 struct net *net = dev_net(dev);
6348
6349 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006350 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006351 continue;
6352 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6353 &iter->dev->adj_list.lower);
6354 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6355 &dev->adj_list.upper);
6356 }
6357
6358 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006359 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006360 continue;
6361 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6362 &iter->dev->adj_list.upper);
6363 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6364 &dev->adj_list.lower);
6365 }
6366}
6367
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006368void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02006369{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006370 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006371
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006372 struct net *net = dev_net(dev);
6373
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006374 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006375 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006376 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006377 netdev_adjacent_sysfs_del(iter->dev, oldname,
6378 &iter->dev->adj_list.lower);
6379 netdev_adjacent_sysfs_add(iter->dev, dev,
6380 &iter->dev->adj_list.lower);
6381 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006382
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006383 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006384 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006385 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006386 netdev_adjacent_sysfs_del(iter->dev, oldname,
6387 &iter->dev->adj_list.upper);
6388 netdev_adjacent_sysfs_add(iter->dev, dev,
6389 &iter->dev->adj_list.upper);
6390 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006391}
Veaceslav Falico402dae92013-09-25 09:20:09 +02006392
6393void *netdev_lower_dev_get_private(struct net_device *dev,
6394 struct net_device *lower_dev)
6395{
6396 struct netdev_adjacent *lower;
6397
6398 if (!lower_dev)
6399 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006400 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02006401 if (!lower)
6402 return NULL;
6403
6404 return lower->private;
6405}
6406EXPORT_SYMBOL(netdev_lower_dev_get_private);
6407
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006408
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006409int dev_get_nest_level(struct net_device *dev)
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006410{
6411 struct net_device *lower = NULL;
6412 struct list_head *iter;
6413 int max_nest = -1;
6414 int nest;
6415
6416 ASSERT_RTNL();
6417
6418 netdev_for_each_lower_dev(dev, lower, iter) {
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006419 nest = dev_get_nest_level(lower);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006420 if (max_nest < nest)
6421 max_nest = nest;
6422 }
6423
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006424 return max_nest + 1;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006425}
6426EXPORT_SYMBOL(dev_get_nest_level);
6427
Jiri Pirko04d48262015-12-03 12:12:15 +01006428/**
6429 * netdev_lower_change - Dispatch event about lower device state change
6430 * @lower_dev: device
6431 * @lower_state_info: state to dispatch
6432 *
6433 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6434 * The caller must hold the RTNL lock.
6435 */
6436void netdev_lower_state_changed(struct net_device *lower_dev,
6437 void *lower_state_info)
6438{
6439 struct netdev_notifier_changelowerstate_info changelowerstate_info;
6440
6441 ASSERT_RTNL();
6442 changelowerstate_info.lower_state_info = lower_state_info;
6443 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
6444 &changelowerstate_info.info);
6445}
6446EXPORT_SYMBOL(netdev_lower_state_changed);
6447
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006448static void dev_change_rx_flags(struct net_device *dev, int flags)
6449{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006450 const struct net_device_ops *ops = dev->netdev_ops;
6451
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05006452 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006453 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006454}
6455
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006456static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07006457{
Eric Dumazetb536db92011-11-30 21:42:26 +00006458 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006459 kuid_t uid;
6460 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07006461
Patrick McHardy24023452007-07-14 18:51:31 -07006462 ASSERT_RTNL();
6463
Wang Chendad9b332008-06-18 01:48:28 -07006464 dev->flags |= IFF_PROMISC;
6465 dev->promiscuity += inc;
6466 if (dev->promiscuity == 0) {
6467 /*
6468 * Avoid overflow.
6469 * If inc causes overflow, untouch promisc and return error.
6470 */
6471 if (inc < 0)
6472 dev->flags &= ~IFF_PROMISC;
6473 else {
6474 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006475 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6476 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006477 return -EOVERFLOW;
6478 }
6479 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006480 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006481 pr_info("device %s %s promiscuous mode\n",
6482 dev->name,
6483 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11006484 if (audit_enabled) {
6485 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006486 audit_log(current->audit_context, GFP_ATOMIC,
6487 AUDIT_ANOM_PROMISCUOUS,
6488 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6489 dev->name, (dev->flags & IFF_PROMISC),
6490 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07006491 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006492 from_kuid(&init_user_ns, uid),
6493 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006494 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11006495 }
Patrick McHardy24023452007-07-14 18:51:31 -07006496
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006497 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07006498 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006499 if (notify)
6500 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07006501 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006502}
6503
Linus Torvalds1da177e2005-04-16 15:20:36 -07006504/**
6505 * dev_set_promiscuity - update promiscuity count on a device
6506 * @dev: device
6507 * @inc: modifier
6508 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07006509 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510 * remains above zero the interface remains promiscuous. Once it hits zero
6511 * the device reverts back to normal filtering operation. A negative inc
6512 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07006513 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006514 */
Wang Chendad9b332008-06-18 01:48:28 -07006515int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006516{
Eric Dumazetb536db92011-11-30 21:42:26 +00006517 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07006518 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006519
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006520 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07006521 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07006522 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07006523 if (dev->flags != old_flags)
6524 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07006525 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006526}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006527EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006528
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006529static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006530{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006531 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006532
Patrick McHardy24023452007-07-14 18:51:31 -07006533 ASSERT_RTNL();
6534
Linus Torvalds1da177e2005-04-16 15:20:36 -07006535 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07006536 dev->allmulti += inc;
6537 if (dev->allmulti == 0) {
6538 /*
6539 * Avoid overflow.
6540 * If inc causes overflow, untouch allmulti and return error.
6541 */
6542 if (inc < 0)
6543 dev->flags &= ~IFF_ALLMULTI;
6544 else {
6545 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006546 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6547 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006548 return -EOVERFLOW;
6549 }
6550 }
Patrick McHardy24023452007-07-14 18:51:31 -07006551 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006552 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07006553 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006554 if (notify)
6555 __dev_notify_flags(dev, old_flags,
6556 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07006557 }
Wang Chendad9b332008-06-18 01:48:28 -07006558 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006559}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006560
6561/**
6562 * dev_set_allmulti - update allmulti count on a device
6563 * @dev: device
6564 * @inc: modifier
6565 *
6566 * Add or remove reception of all multicast frames to a device. While the
6567 * count in the device remains above zero the interface remains listening
6568 * to all interfaces. Once it hits zero the device reverts back to normal
6569 * filtering operation. A negative @inc value is used to drop the counter
6570 * when releasing a resource needing all multicasts.
6571 * Return 0 if successful or a negative errno code on error.
6572 */
6573
6574int dev_set_allmulti(struct net_device *dev, int inc)
6575{
6576 return __dev_set_allmulti(dev, inc, true);
6577}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006578EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07006579
6580/*
6581 * Upload unicast and multicast address lists to device and
6582 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08006583 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07006584 * are present.
6585 */
6586void __dev_set_rx_mode(struct net_device *dev)
6587{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006588 const struct net_device_ops *ops = dev->netdev_ops;
6589
Patrick McHardy4417da62007-06-27 01:28:10 -07006590 /* dev_open will call this function so the list will stay sane. */
6591 if (!(dev->flags&IFF_UP))
6592 return;
6593
6594 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09006595 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07006596
Jiri Pirko01789342011-08-16 06:29:00 +00006597 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07006598 /* Unicast addresses changes may only happen under the rtnl,
6599 * therefore calling __dev_set_promiscuity here is safe.
6600 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006601 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006602 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006603 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006604 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006605 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006606 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07006607 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006608 }
Jiri Pirko01789342011-08-16 06:29:00 +00006609
6610 if (ops->ndo_set_rx_mode)
6611 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006612}
6613
6614void dev_set_rx_mode(struct net_device *dev)
6615{
David S. Millerb9e40852008-07-15 00:15:08 -07006616 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006617 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07006618 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619}
6620
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006621/**
6622 * dev_get_flags - get flags reported to userspace
6623 * @dev: device
6624 *
6625 * Get the combination of flag bits exported through APIs to userspace.
6626 */
Eric Dumazet95c96172012-04-15 05:58:06 +00006627unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006628{
Eric Dumazet95c96172012-04-15 05:58:06 +00006629 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006630
6631 flags = (dev->flags & ~(IFF_PROMISC |
6632 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08006633 IFF_RUNNING |
6634 IFF_LOWER_UP |
6635 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07006636 (dev->gflags & (IFF_PROMISC |
6637 IFF_ALLMULTI));
6638
Stefan Rompfb00055a2006-03-20 17:09:11 -08006639 if (netif_running(dev)) {
6640 if (netif_oper_up(dev))
6641 flags |= IFF_RUNNING;
6642 if (netif_carrier_ok(dev))
6643 flags |= IFF_LOWER_UP;
6644 if (netif_dormant(dev))
6645 flags |= IFF_DORMANT;
6646 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006647
6648 return flags;
6649}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006650EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006651
Patrick McHardybd380812010-02-26 06:34:53 +00006652int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006653{
Eric Dumazetb536db92011-11-30 21:42:26 +00006654 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00006655 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006656
Patrick McHardy24023452007-07-14 18:51:31 -07006657 ASSERT_RTNL();
6658
Linus Torvalds1da177e2005-04-16 15:20:36 -07006659 /*
6660 * Set the flags on our device.
6661 */
6662
6663 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6664 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6665 IFF_AUTOMEDIA)) |
6666 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6667 IFF_ALLMULTI));
6668
6669 /*
6670 * Load in the correct multicast list now the flags have changed.
6671 */
6672
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006673 if ((old_flags ^ flags) & IFF_MULTICAST)
6674 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07006675
Patrick McHardy4417da62007-06-27 01:28:10 -07006676 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006677
6678 /*
6679 * Have we downed the interface. We handle IFF_UP ourselves
6680 * according to user attempts to set it, rather than blindly
6681 * setting it.
6682 */
6683
6684 ret = 0;
Peter Pan(潘卫平)d215d102014-06-16 21:57:22 +08006685 if ((old_flags ^ flags) & IFF_UP)
Patrick McHardybd380812010-02-26 06:34:53 +00006686 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006687
Linus Torvalds1da177e2005-04-16 15:20:36 -07006688 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006689 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006690 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006691
Linus Torvalds1da177e2005-04-16 15:20:36 -07006692 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006693
6694 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6695 if (dev->flags != old_flags)
6696 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006697 }
6698
6699 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
tchardingeb13da12017-02-09 17:56:06 +11006700 * is important. Some (broken) drivers set IFF_PROMISC, when
6701 * IFF_ALLMULTI is requested not asking us and not reporting.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006702 */
6703 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006704 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6705
Linus Torvalds1da177e2005-04-16 15:20:36 -07006706 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006707 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006708 }
6709
Patrick McHardybd380812010-02-26 06:34:53 +00006710 return ret;
6711}
6712
Nicolas Dichtela528c212013-09-25 12:02:44 +02006713void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6714 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00006715{
6716 unsigned int changes = dev->flags ^ old_flags;
6717
Nicolas Dichtela528c212013-09-25 12:02:44 +02006718 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006719 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006720
Patrick McHardybd380812010-02-26 06:34:53 +00006721 if (changes & IFF_UP) {
6722 if (dev->flags & IFF_UP)
6723 call_netdevice_notifiers(NETDEV_UP, dev);
6724 else
6725 call_netdevice_notifiers(NETDEV_DOWN, dev);
6726 }
6727
6728 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00006729 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6730 struct netdev_notifier_change_info change_info;
6731
6732 change_info.flags_changed = changes;
6733 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6734 &change_info.info);
6735 }
Patrick McHardybd380812010-02-26 06:34:53 +00006736}
6737
6738/**
6739 * dev_change_flags - change device settings
6740 * @dev: device
6741 * @flags: device state flags
6742 *
6743 * Change settings on device based state flags. The flags are
6744 * in the userspace exported format.
6745 */
Eric Dumazetb536db92011-11-30 21:42:26 +00006746int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00006747{
Eric Dumazetb536db92011-11-30 21:42:26 +00006748 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006749 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00006750
6751 ret = __dev_change_flags(dev, flags);
6752 if (ret < 0)
6753 return ret;
6754
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006755 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006756 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006757 return ret;
6758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006759EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006760
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006761static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6762{
6763 const struct net_device_ops *ops = dev->netdev_ops;
6764
6765 if (ops->ndo_change_mtu)
6766 return ops->ndo_change_mtu(dev, new_mtu);
6767
6768 dev->mtu = new_mtu;
6769 return 0;
6770}
6771
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006772/**
6773 * dev_set_mtu - Change maximum transfer unit
6774 * @dev: device
6775 * @new_mtu: new transfer unit
6776 *
6777 * Change the maximum transfer size of the network device.
6778 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006779int dev_set_mtu(struct net_device *dev, int new_mtu)
6780{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006781 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782
6783 if (new_mtu == dev->mtu)
6784 return 0;
6785
Jarod Wilson61e84622016-10-07 22:04:33 -04006786 /* MTU must be positive, and in range */
6787 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6788 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6789 dev->name, new_mtu, dev->min_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006790 return -EINVAL;
Jarod Wilson61e84622016-10-07 22:04:33 -04006791 }
6792
6793 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6794 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
Jakub Kicinskia0e65de2016-10-17 18:02:22 +01006795 dev->name, new_mtu, dev->max_mtu);
Jarod Wilson61e84622016-10-07 22:04:33 -04006796 return -EINVAL;
6797 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006798
6799 if (!netif_device_present(dev))
6800 return -ENODEV;
6801
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01006802 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6803 err = notifier_to_errno(err);
6804 if (err)
6805 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006806
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006807 orig_mtu = dev->mtu;
6808 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006809
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006810 if (!err) {
6811 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6812 err = notifier_to_errno(err);
6813 if (err) {
6814 /* setting mtu back and notifying everyone again,
6815 * so that they have a chance to revert changes.
6816 */
6817 __dev_set_mtu(dev, orig_mtu);
6818 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6819 }
6820 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006821 return err;
6822}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006823EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006824
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006825/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006826 * dev_set_group - Change group this device belongs to
6827 * @dev: device
6828 * @new_group: group this device should belong to
6829 */
6830void dev_set_group(struct net_device *dev, int new_group)
6831{
6832 dev->group = new_group;
6833}
6834EXPORT_SYMBOL(dev_set_group);
6835
6836/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006837 * dev_set_mac_address - Change Media Access Control Address
6838 * @dev: device
6839 * @sa: new address
6840 *
6841 * Change the hardware (MAC) address of the device
6842 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006843int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6844{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006845 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006846 int err;
6847
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006848 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006849 return -EOPNOTSUPP;
6850 if (sa->sa_family != dev->type)
6851 return -EINVAL;
6852 if (!netif_device_present(dev))
6853 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006854 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00006855 if (err)
6856 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00006857 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00006858 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006859 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00006860 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006861}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006862EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006863
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006864/**
6865 * dev_change_carrier - Change device carrier
6866 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00006867 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006868 *
6869 * Change device carrier
6870 */
6871int dev_change_carrier(struct net_device *dev, bool new_carrier)
6872{
6873 const struct net_device_ops *ops = dev->netdev_ops;
6874
6875 if (!ops->ndo_change_carrier)
6876 return -EOPNOTSUPP;
6877 if (!netif_device_present(dev))
6878 return -ENODEV;
6879 return ops->ndo_change_carrier(dev, new_carrier);
6880}
6881EXPORT_SYMBOL(dev_change_carrier);
6882
Linus Torvalds1da177e2005-04-16 15:20:36 -07006883/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02006884 * dev_get_phys_port_id - Get device physical port ID
6885 * @dev: device
6886 * @ppid: port ID
6887 *
6888 * Get device physical port ID
6889 */
6890int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01006891 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02006892{
6893 const struct net_device_ops *ops = dev->netdev_ops;
6894
6895 if (!ops->ndo_get_phys_port_id)
6896 return -EOPNOTSUPP;
6897 return ops->ndo_get_phys_port_id(dev, ppid);
6898}
6899EXPORT_SYMBOL(dev_get_phys_port_id);
6900
6901/**
David Aherndb24a902015-03-17 20:23:15 -06006902 * dev_get_phys_port_name - Get device physical port name
6903 * @dev: device
6904 * @name: port name
Luis de Bethencourted49e652016-03-21 16:31:14 +00006905 * @len: limit of bytes to copy to name
David Aherndb24a902015-03-17 20:23:15 -06006906 *
6907 * Get device physical port name
6908 */
6909int dev_get_phys_port_name(struct net_device *dev,
6910 char *name, size_t len)
6911{
6912 const struct net_device_ops *ops = dev->netdev_ops;
6913
6914 if (!ops->ndo_get_phys_port_name)
6915 return -EOPNOTSUPP;
6916 return ops->ndo_get_phys_port_name(dev, name, len);
6917}
6918EXPORT_SYMBOL(dev_get_phys_port_name);
6919
6920/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006921 * dev_change_proto_down - update protocol port state information
6922 * @dev: device
6923 * @proto_down: new value
6924 *
6925 * This info can be used by switch drivers to set the phys state of the
6926 * port.
6927 */
6928int dev_change_proto_down(struct net_device *dev, bool proto_down)
6929{
6930 const struct net_device_ops *ops = dev->netdev_ops;
6931
6932 if (!ops->ndo_change_proto_down)
6933 return -EOPNOTSUPP;
6934 if (!netif_device_present(dev))
6935 return -ENODEV;
6936 return ops->ndo_change_proto_down(dev, proto_down);
6937}
6938EXPORT_SYMBOL(dev_change_proto_down);
6939
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006940bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
6941{
6942 struct netdev_xdp xdp;
6943
6944 memset(&xdp, 0, sizeof(xdp));
6945 xdp.command = XDP_QUERY_PROG;
6946
6947 /* Query must always succeed. */
6948 WARN_ON(xdp_op(dev, &xdp) < 0);
6949 return xdp.prog_attached;
6950}
6951
6952static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
6953 struct netlink_ext_ack *extack,
6954 struct bpf_prog *prog)
6955{
6956 struct netdev_xdp xdp;
6957
6958 memset(&xdp, 0, sizeof(xdp));
6959 xdp.command = XDP_SETUP_PROG;
6960 xdp.extack = extack;
6961 xdp.prog = prog;
6962
6963 return xdp_op(dev, &xdp);
6964}
6965
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006966/**
Brenden Blancoa7862b42016-07-19 12:16:48 -07006967 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6968 * @dev: device
Jakub Kicinskib5d60982017-05-01 15:53:43 -07006969 * @extack: netlink extended ack
Brenden Blancoa7862b42016-07-19 12:16:48 -07006970 * @fd: new program fd or negative value to clear
Daniel Borkmann85de8572016-11-28 23:16:54 +01006971 * @flags: xdp-related flags
Brenden Blancoa7862b42016-07-19 12:16:48 -07006972 *
6973 * Set or clear a bpf program for a device
6974 */
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07006975int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6976 int fd, u32 flags)
Brenden Blancoa7862b42016-07-19 12:16:48 -07006977{
6978 const struct net_device_ops *ops = dev->netdev_ops;
6979 struct bpf_prog *prog = NULL;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006980 xdp_op_t xdp_op, xdp_chk;
Brenden Blancoa7862b42016-07-19 12:16:48 -07006981 int err;
6982
Daniel Borkmann85de8572016-11-28 23:16:54 +01006983 ASSERT_RTNL();
6984
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006985 xdp_op = xdp_chk = ops->ndo_xdp;
Daniel Borkmann0489df92017-05-12 01:04:45 +02006986 if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE))
6987 return -EOPNOTSUPP;
David S. Millerb5cdae32017-04-18 15:36:58 -04006988 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6989 xdp_op = generic_xdp_install;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006990 if (xdp_op == xdp_chk)
6991 xdp_chk = generic_xdp_install;
David S. Millerb5cdae32017-04-18 15:36:58 -04006992
Brenden Blancoa7862b42016-07-19 12:16:48 -07006993 if (fd >= 0) {
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006994 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
6995 return -EEXIST;
6996 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
6997 __dev_xdp_attached(dev, xdp_op))
6998 return -EBUSY;
Daniel Borkmann85de8572016-11-28 23:16:54 +01006999
Brenden Blancoa7862b42016-07-19 12:16:48 -07007000 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
7001 if (IS_ERR(prog))
7002 return PTR_ERR(prog);
7003 }
7004
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02007005 err = dev_xdp_install(dev, xdp_op, extack, prog);
Brenden Blancoa7862b42016-07-19 12:16:48 -07007006 if (err < 0 && prog)
7007 bpf_prog_put(prog);
7008
7009 return err;
7010}
Brenden Blancoa7862b42016-07-19 12:16:48 -07007011
7012/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007013 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07007014 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07007015 *
7016 * Returns a suitable unique value for a new device interface
7017 * number. The caller must hold the rtnl semaphore or the
7018 * dev_base_lock to be sure it remains unique.
7019 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07007020static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007021{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00007022 int ifindex = net->ifindex;
tchardingf4563a72017-02-09 17:56:07 +11007023
Linus Torvalds1da177e2005-04-16 15:20:36 -07007024 for (;;) {
7025 if (++ifindex <= 0)
7026 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007027 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00007028 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007029 }
7030}
7031
Linus Torvalds1da177e2005-04-16 15:20:36 -07007032/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08007033static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07007034DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007035
Stephen Hemminger6f05f622007-03-08 20:46:03 -08007036static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007037{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007038 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007039 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007040}
7041
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007042static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007043{
Krishna Kumare93737b2009-12-08 22:26:02 +00007044 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007045 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007046
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007047 BUG_ON(dev_boot_phase);
7048 ASSERT_RTNL();
7049
Krishna Kumare93737b2009-12-08 22:26:02 +00007050 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007051 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00007052 * for initialization unwind. Remove those
7053 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007054 */
7055 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007056 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7057 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007058
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007059 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00007060 list_del(&dev->unreg_list);
7061 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007062 }
Eric Dumazet449f4542011-05-19 12:24:16 +00007063 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007064 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00007065 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007066
Octavian Purdila44345722010-12-13 12:44:07 +00007067 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007068 list_for_each_entry(dev, head, unreg_list)
7069 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04007070 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007071
Octavian Purdila44345722010-12-13 12:44:07 +00007072 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007073 /* And unlink it from device chain. */
7074 unlist_netdevice(dev);
7075
7076 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007077 }
Eric Dumazet41852492016-08-26 12:50:39 -07007078 flush_all_backlogs();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007079
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007080 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007081
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007082 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007083 struct sk_buff *skb = NULL;
7084
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007085 /* Shutdown queueing discipline. */
7086 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007087
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007088
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007089 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11007090 * this device. They should clean all the things.
7091 */
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007092 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7093
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007094 if (!dev->rtnl_link_ops ||
7095 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04007096 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007097 GFP_KERNEL);
7098
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007099 /*
7100 * Flush the unicast and multicast chains
7101 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007102 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00007103 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007104
7105 if (dev->netdev_ops->ndo_uninit)
7106 dev->netdev_ops->ndo_uninit(dev);
7107
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007108 if (skb)
7109 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07007110
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007111 /* Notifier chain MUST detach us all upper devices. */
7112 WARN_ON(netdev_has_any_upper_dev(dev));
David Ahern0f524a82016-10-17 19:15:52 -07007113 WARN_ON(netdev_has_any_lower_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007114
7115 /* Remove entries from kobject tree */
7116 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00007117#ifdef CONFIG_XPS
7118 /* Remove XPS queueing entries */
7119 netif_reset_xps_queues_gt(dev, 0);
7120#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007121 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007122
Eric W. Biederman850a5452011-10-13 22:25:23 +00007123 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007124
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00007125 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007126 dev_put(dev);
7127}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007128
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007129static void rollback_registered(struct net_device *dev)
7130{
7131 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007132
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007133 list_add(&dev->unreg_list, &single);
7134 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007135 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007136}
7137
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007138static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7139 struct net_device *upper, netdev_features_t features)
7140{
7141 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7142 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007143 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007144
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007145 for_each_netdev_feature(&upper_disables, feature_bit) {
7146 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007147 if (!(upper->wanted_features & feature)
7148 && (features & feature)) {
7149 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7150 &feature, upper->name);
7151 features &= ~feature;
7152 }
7153 }
7154
7155 return features;
7156}
7157
7158static void netdev_sync_lower_features(struct net_device *upper,
7159 struct net_device *lower, netdev_features_t features)
7160{
7161 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7162 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007163 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007164
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007165 for_each_netdev_feature(&upper_disables, feature_bit) {
7166 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007167 if (!(features & feature) && (lower->features & feature)) {
7168 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7169 &feature, lower->name);
7170 lower->wanted_features &= ~feature;
7171 netdev_update_features(lower);
7172
7173 if (unlikely(lower->features & feature))
7174 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7175 &feature, lower->name);
7176 }
7177 }
7178}
7179
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007180static netdev_features_t netdev_fix_features(struct net_device *dev,
7181 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07007182{
Michał Mirosław57422dc2011-01-22 12:14:12 +00007183 /* Fix illegal checksum combinations */
7184 if ((features & NETIF_F_HW_CSUM) &&
7185 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007186 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00007187 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7188 }
7189
Herbert Xub63365a2008-10-23 01:11:29 -07007190 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007191 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007192 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007193 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07007194 }
7195
Pravin B Shelarec5f0612013-03-07 09:28:01 +00007196 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7197 !(features & NETIF_F_IP_CSUM)) {
7198 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7199 features &= ~NETIF_F_TSO;
7200 features &= ~NETIF_F_TSO_ECN;
7201 }
7202
7203 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7204 !(features & NETIF_F_IPV6_CSUM)) {
7205 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7206 features &= ~NETIF_F_TSO6;
7207 }
7208
Alexander Duyckb1dc4972016-05-02 09:38:24 -07007209 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7210 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7211 features &= ~NETIF_F_TSO_MANGLEID;
7212
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00007213 /* TSO ECN requires that TSO is present as well. */
7214 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7215 features &= ~NETIF_F_TSO_ECN;
7216
Michał Mirosław212b5732011-02-15 16:59:16 +00007217 /* Software GSO depends on SG. */
7218 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007219 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00007220 features &= ~NETIF_F_GSO;
7221 }
7222
Michał Mirosławacd11302011-01-24 15:45:15 -08007223 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07007224 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00007225 /* maybe split UFO into V4 and V6? */
Tom Herbertc8cd0982015-12-14 11:19:44 -08007226 if (!(features & NETIF_F_HW_CSUM) &&
7227 ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
7228 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007229 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08007230 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07007231 features &= ~NETIF_F_UFO;
7232 }
7233
7234 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007235 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08007236 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07007237 features &= ~NETIF_F_UFO;
7238 }
7239 }
7240
Alexander Duyck802ab552016-04-10 21:45:03 -04007241 /* GSO partial features require GSO partial be set */
7242 if ((features & dev->gso_partial_features) &&
7243 !(features & NETIF_F_GSO_PARTIAL)) {
7244 netdev_dbg(dev,
7245 "Dropping partially supported GSO features since no GSO partial.\n");
7246 features &= ~dev->gso_partial_features;
7247 }
7248
Herbert Xub63365a2008-10-23 01:11:29 -07007249 return features;
7250}
Herbert Xub63365a2008-10-23 01:11:29 -07007251
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007252int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00007253{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007254 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007255 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007256 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05007257 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00007258
Michał Mirosław87267482011-04-12 09:56:38 +00007259 ASSERT_RTNL();
7260
Michał Mirosław5455c692011-02-15 16:59:17 +00007261 features = netdev_get_wanted_features(dev);
7262
7263 if (dev->netdev_ops->ndo_fix_features)
7264 features = dev->netdev_ops->ndo_fix_features(dev, features);
7265
7266 /* driver might be less strict about feature dependencies */
7267 features = netdev_fix_features(dev, features);
7268
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007269 /* some features can't be enabled if they're off an an upper device */
7270 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7271 features = netdev_sync_upper_features(dev, upper, features);
7272
Michał Mirosław5455c692011-02-15 16:59:17 +00007273 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05007274 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00007275
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007276 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7277 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00007278
7279 if (dev->netdev_ops->ndo_set_features)
7280 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01007281 else
7282 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00007283
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007284 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00007285 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007286 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7287 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01007288 /* return non-0 since some features might have changed and
7289 * it's better to fire a spurious notification than miss it
7290 */
7291 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007292 }
7293
Jarod Wilsone7868a82015-11-03 23:09:32 -05007294sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007295 /* some features must be disabled on lower devices when disabled
7296 * on an upper device (think: bonding master or bridge)
7297 */
7298 netdev_for_each_lower_dev(dev, lower, iter)
7299 netdev_sync_lower_features(dev, lower, features);
7300
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007301 if (!err)
7302 dev->features = features;
7303
Jarod Wilsone7868a82015-11-03 23:09:32 -05007304 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007305}
7306
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007307/**
7308 * netdev_update_features - recalculate device features
7309 * @dev: the device to check
7310 *
7311 * Recalculate dev->features set and send notifications if it
7312 * has changed. Should be called after driver or hardware dependent
7313 * conditions might have changed that influence the features.
7314 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007315void netdev_update_features(struct net_device *dev)
7316{
7317 if (__netdev_update_features(dev))
7318 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00007319}
7320EXPORT_SYMBOL(netdev_update_features);
7321
Linus Torvalds1da177e2005-04-16 15:20:36 -07007322/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007323 * netdev_change_features - recalculate device features
7324 * @dev: the device to check
7325 *
7326 * Recalculate dev->features set and send notifications even
7327 * if they have not changed. Should be called instead of
7328 * netdev_update_features() if also dev->vlan_features might
7329 * have changed to allow the changes to be propagated to stacked
7330 * VLAN devices.
7331 */
7332void netdev_change_features(struct net_device *dev)
7333{
7334 __netdev_update_features(dev);
7335 netdev_features_change(dev);
7336}
7337EXPORT_SYMBOL(netdev_change_features);
7338
7339/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007340 * netif_stacked_transfer_operstate - transfer operstate
7341 * @rootdev: the root or lower level device to transfer state from
7342 * @dev: the device to transfer operstate to
7343 *
7344 * Transfer operational state from root to device. This is normally
7345 * called when a stacking relationship exists between the root
7346 * device and the device(a leaf device).
7347 */
7348void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7349 struct net_device *dev)
7350{
7351 if (rootdev->operstate == IF_OPER_DORMANT)
7352 netif_dormant_on(dev);
7353 else
7354 netif_dormant_off(dev);
7355
Zhang Shengju0575c862017-04-26 17:49:38 +08007356 if (netif_carrier_ok(rootdev))
7357 netif_carrier_on(dev);
7358 else
7359 netif_carrier_off(dev);
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007360}
7361EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7362
Michael Daltona953be52014-01-16 22:23:28 -08007363#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007364static int netif_alloc_rx_queues(struct net_device *dev)
7365{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007366 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00007367 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05307368 size_t sz = count * sizeof(*rx);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007369
Tom Herbertbd25fa72010-10-18 18:00:16 +00007370 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007371
Michal Hockoda6bc572017-05-08 15:57:31 -07007372 rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
7373 if (!rx)
7374 return -ENOMEM;
7375
Tom Herbertbd25fa72010-10-18 18:00:16 +00007376 dev->_rx = rx;
7377
Tom Herbertbd25fa72010-10-18 18:00:16 +00007378 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00007379 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007380 return 0;
7381}
Tom Herbertbf264142010-11-26 08:36:09 +00007382#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007383
Changli Gaoaa942102010-12-04 02:31:41 +00007384static void netdev_init_one_queue(struct net_device *dev,
7385 struct netdev_queue *queue, void *_unused)
7386{
7387 /* Initialize queue lock */
7388 spin_lock_init(&queue->_xmit_lock);
7389 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7390 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00007391 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00007392 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00007393#ifdef CONFIG_BQL
7394 dql_init(&queue->dql, HZ);
7395#endif
Changli Gaoaa942102010-12-04 02:31:41 +00007396}
7397
Eric Dumazet60877a32013-06-20 01:15:51 -07007398static void netif_free_tx_queues(struct net_device *dev)
7399{
WANG Cong4cb28972014-06-02 15:55:22 -07007400 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07007401}
7402
Tom Herberte6484932010-10-18 18:04:39 +00007403static int netif_alloc_netdev_queues(struct net_device *dev)
7404{
7405 unsigned int count = dev->num_tx_queues;
7406 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07007407 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00007408
Eric Dumazetd3397272015-07-06 17:13:26 +02007409 if (count < 1 || count > 0xffff)
7410 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00007411
Michal Hockoda6bc572017-05-08 15:57:31 -07007412 tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
7413 if (!tx)
7414 return -ENOMEM;
7415
Tom Herberte6484932010-10-18 18:04:39 +00007416 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00007417
Tom Herberte6484932010-10-18 18:04:39 +00007418 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7419 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00007420
7421 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00007422}
7423
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007424void netif_tx_stop_all_queues(struct net_device *dev)
7425{
7426 unsigned int i;
7427
7428 for (i = 0; i < dev->num_tx_queues; i++) {
7429 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
tchardingf4563a72017-02-09 17:56:07 +11007430
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007431 netif_tx_stop_queue(txq);
7432 }
7433}
7434EXPORT_SYMBOL(netif_tx_stop_all_queues);
7435
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007436/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007437 * register_netdevice - register a network device
7438 * @dev: device to register
7439 *
7440 * Take a completed network device structure and add it to the kernel
7441 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7442 * chain. 0 is returned on success. A negative errno code is returned
7443 * on a failure to set up the device, or if the name is a duplicate.
7444 *
7445 * Callers must hold the rtnl semaphore. You may want
7446 * register_netdev() instead of this.
7447 *
7448 * BUGS:
7449 * The locking appears insufficient to guarantee two parallel registers
7450 * will not get the same name.
7451 */
7452
7453int register_netdevice(struct net_device *dev)
7454{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007455 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007456 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457
7458 BUG_ON(dev_boot_phase);
7459 ASSERT_RTNL();
7460
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007461 might_sleep();
7462
Linus Torvalds1da177e2005-04-16 15:20:36 -07007463 /* When net_device's are persistent, this will be fatal. */
7464 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007465 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007466
David S. Millerf1f28aa2008-07-15 00:08:33 -07007467 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07007468 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007469
Gao feng828de4f2012-09-13 20:58:27 +00007470 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00007471 if (ret < 0)
7472 goto out;
7473
Linus Torvalds1da177e2005-04-16 15:20:36 -07007474 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007475 if (dev->netdev_ops->ndo_init) {
7476 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007477 if (ret) {
7478 if (ret > 0)
7479 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08007480 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007481 }
7482 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007483
Patrick McHardyf6469682013-04-19 02:04:27 +00007484 if (((dev->hw_features | dev->features) &
7485 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00007486 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7487 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7488 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7489 ret = -EINVAL;
7490 goto err_uninit;
7491 }
7492
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00007493 ret = -EBUSY;
7494 if (!dev->ifindex)
7495 dev->ifindex = dev_new_index(net);
7496 else if (__dev_get_by_index(net, dev->ifindex))
7497 goto err_uninit;
7498
Michał Mirosław5455c692011-02-15 16:59:17 +00007499 /* Transfer changeable features to wanted_features and enable
7500 * software offloads (GSO and GRO).
7501 */
7502 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00007503 dev->features |= NETIF_F_SOFT_FEATURES;
7504 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007505
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007506 if (!(dev->flags & IFF_LOOPBACK))
Michał Mirosław34324dc2011-11-15 15:29:55 +00007507 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007508
Alexander Duyck7f348a62016-04-20 16:51:00 -04007509 /* If IPv4 TCP segmentation offload is supported we should also
7510 * allow the device to enable segmenting the frame with the option
7511 * of ignoring a static IP ID value. This doesn't enable the
7512 * feature itself but allows the user to enable it later.
7513 */
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007514 if (dev->hw_features & NETIF_F_TSO)
7515 dev->hw_features |= NETIF_F_TSO_MANGLEID;
Alexander Duyck7f348a62016-04-20 16:51:00 -04007516 if (dev->vlan_features & NETIF_F_TSO)
7517 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7518 if (dev->mpls_features & NETIF_F_TSO)
7519 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7520 if (dev->hw_enc_features & NETIF_F_TSO)
7521 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07007522
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007523 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00007524 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007525 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00007526
Pravin B Shelaree579672013-03-07 09:28:08 +00007527 /* Make NETIF_F_SG inheritable to tunnel devices.
7528 */
Alexander Duyck802ab552016-04-10 21:45:03 -04007529 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
Pravin B Shelaree579672013-03-07 09:28:08 +00007530
Simon Horman0d89d202013-05-23 21:02:52 +00007531 /* Make NETIF_F_SG inheritable to MPLS.
7532 */
7533 dev->mpls_features |= NETIF_F_SG;
7534
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00007535 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7536 ret = notifier_to_errno(ret);
7537 if (ret)
7538 goto err_uninit;
7539
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007540 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007541 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007542 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007543 dev->reg_state = NETREG_REGISTERED;
7544
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007545 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00007546
Linus Torvalds1da177e2005-04-16 15:20:36 -07007547 /*
7548 * Default initial state at registry is that the
7549 * device is present.
7550 */
7551
7552 set_bit(__LINK_STATE_PRESENT, &dev->state);
7553
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01007554 linkwatch_init_dev(dev);
7555
Linus Torvalds1da177e2005-04-16 15:20:36 -07007556 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007557 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007558 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04007559 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007560
Jiri Pirko948b3372013-01-08 01:38:25 +00007561 /* If the device has permanent device address, driver should
7562 * set dev_addr and also addr_assign_type should be set to
7563 * NET_ADDR_PERM (default value).
7564 */
7565 if (dev->addr_assign_type == NET_ADDR_PERM)
7566 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7567
Linus Torvalds1da177e2005-04-16 15:20:36 -07007568 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007569 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07007570 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007571 if (ret) {
7572 rollback_registered(dev);
7573 dev->reg_state = NETREG_UNREGISTERED;
7574 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007575 /*
7576 * Prevent userspace races by waiting until the network
7577 * device is fully setup before sending notifications.
7578 */
Patrick McHardya2835762010-02-26 06:34:51 +00007579 if (!dev->rtnl_link_ops ||
7580 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007581 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007582
7583out:
7584 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007585
7586err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007587 if (dev->netdev_ops->ndo_uninit)
7588 dev->netdev_ops->ndo_uninit(dev);
David S. Millercf124db2017-05-08 12:52:56 -04007589 if (dev->priv_destructor)
7590 dev->priv_destructor(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007591 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007592}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007593EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007594
7595/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007596 * init_dummy_netdev - init a dummy network device for NAPI
7597 * @dev: device to init
7598 *
7599 * This takes a network device structure and initialize the minimum
7600 * amount of fields so it can be used to schedule NAPI polls without
7601 * registering a full blown interface. This is to be used by drivers
7602 * that need to tie several hardware interfaces to a single NAPI
7603 * poll scheduler due to HW limitations.
7604 */
7605int init_dummy_netdev(struct net_device *dev)
7606{
7607 /* Clear everything. Note we don't initialize spinlocks
7608 * are they aren't supposed to be taken by any of the
7609 * NAPI code and this dummy netdev is supposed to be
7610 * only ever used for NAPI polls
7611 */
7612 memset(dev, 0, sizeof(struct net_device));
7613
7614 /* make sure we BUG if trying to hit standard
7615 * register/unregister code path
7616 */
7617 dev->reg_state = NETREG_DUMMY;
7618
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007619 /* NAPI wants this */
7620 INIT_LIST_HEAD(&dev->napi_list);
7621
7622 /* a dummy interface is started by default */
7623 set_bit(__LINK_STATE_PRESENT, &dev->state);
7624 set_bit(__LINK_STATE_START, &dev->state);
7625
Eric Dumazet29b44332010-10-11 10:22:12 +00007626 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7627 * because users of this 'device' dont need to change
7628 * its refcount.
7629 */
7630
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007631 return 0;
7632}
7633EXPORT_SYMBOL_GPL(init_dummy_netdev);
7634
7635
7636/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007637 * register_netdev - register a network device
7638 * @dev: device to register
7639 *
7640 * Take a completed network device structure and add it to the kernel
7641 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7642 * chain. 0 is returned on success. A negative errno code is returned
7643 * on a failure to set up the device, or if the name is a duplicate.
7644 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07007645 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07007646 * and expands the device name if you passed a format string to
7647 * alloc_netdev.
7648 */
7649int register_netdev(struct net_device *dev)
7650{
7651 int err;
7652
7653 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007654 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007655 rtnl_unlock();
7656 return err;
7657}
7658EXPORT_SYMBOL(register_netdev);
7659
Eric Dumazet29b44332010-10-11 10:22:12 +00007660int netdev_refcnt_read(const struct net_device *dev)
7661{
7662 int i, refcnt = 0;
7663
7664 for_each_possible_cpu(i)
7665 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7666 return refcnt;
7667}
7668EXPORT_SYMBOL(netdev_refcnt_read);
7669
Ben Hutchings2c530402012-07-10 10:55:09 +00007670/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007671 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00007672 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07007673 *
7674 * This is called when unregistering network devices.
7675 *
7676 * Any protocol or device that holds a reference should register
7677 * for netdevice notification, and cleanup and put back the
7678 * reference if they receive an UNREGISTER event.
7679 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007680 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007681 */
7682static void netdev_wait_allrefs(struct net_device *dev)
7683{
7684 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00007685 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007686
Eric Dumazete014deb2009-11-17 05:59:21 +00007687 linkwatch_forget_dev(dev);
7688
Linus Torvalds1da177e2005-04-16 15:20:36 -07007689 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00007690 refcnt = netdev_refcnt_read(dev);
7691
7692 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007693 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007694 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007695
7696 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007697 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007698
Eric Dumazet748e2d92012-08-22 21:50:59 +00007699 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007700 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00007701 rtnl_lock();
7702
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007703 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7705 &dev->state)) {
7706 /* We must not have linkwatch events
7707 * pending on unregister. If this
7708 * happens, we simply run the queue
7709 * unscheduled, resulting in a noop
7710 * for this device.
7711 */
7712 linkwatch_run_queue();
7713 }
7714
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007715 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716
7717 rebroadcast_time = jiffies;
7718 }
7719
7720 msleep(250);
7721
Eric Dumazet29b44332010-10-11 10:22:12 +00007722 refcnt = netdev_refcnt_read(dev);
7723
Linus Torvalds1da177e2005-04-16 15:20:36 -07007724 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007725 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7726 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007727 warning_time = jiffies;
7728 }
7729 }
7730}
7731
7732/* The sequence is:
7733 *
7734 * rtnl_lock();
7735 * ...
7736 * register_netdevice(x1);
7737 * register_netdevice(x2);
7738 * ...
7739 * unregister_netdevice(y1);
7740 * unregister_netdevice(y2);
7741 * ...
7742 * rtnl_unlock();
7743 * free_netdev(y1);
7744 * free_netdev(y2);
7745 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07007746 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007747 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007748 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07007749 * without deadlocking with linkwatch via keventd.
7750 * 2) Since we run with the RTNL semaphore not held, we can sleep
7751 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07007752 *
7753 * We must not return until all unregister events added during
7754 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007755 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007756void netdev_run_todo(void)
7757{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007758 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007759
Linus Torvalds1da177e2005-04-16 15:20:36 -07007760 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007761 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07007762
7763 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007764
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007765
7766 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00007767 if (!list_empty(&list))
7768 rcu_barrier();
7769
Linus Torvalds1da177e2005-04-16 15:20:36 -07007770 while (!list_empty(&list)) {
7771 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00007772 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007773 list_del(&dev->todo_list);
7774
Eric Dumazet748e2d92012-08-22 21:50:59 +00007775 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007776 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00007777 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007778
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007779 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007780 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007781 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007782 dump_stack();
7783 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007784 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007785
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007786 dev->reg_state = NETREG_UNREGISTERED;
7787
7788 netdev_wait_allrefs(dev);
7789
7790 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00007791 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08007792 BUG_ON(!list_empty(&dev->ptype_all));
7793 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00007794 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7795 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07007796 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007797
David S. Millercf124db2017-05-08 12:52:56 -04007798 if (dev->priv_destructor)
7799 dev->priv_destructor(dev);
7800 if (dev->needs_free_netdev)
7801 free_netdev(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007802
Eric W. Biederman50624c92013-09-23 21:19:49 -07007803 /* Report a network device has been unregistered */
7804 rtnl_lock();
7805 dev_net(dev)->dev_unreg_count--;
7806 __rtnl_unlock();
7807 wake_up(&netdev_unregistering_wq);
7808
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007809 /* Free network device */
7810 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007812}
7813
Jarod Wilson92566452016-02-01 18:51:04 -05007814/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7815 * all the same fields in the same order as net_device_stats, with only
7816 * the type differing, but rtnl_link_stats64 may have additional fields
7817 * at the end for newer counters.
Ben Hutchings3cfde792010-07-09 09:11:52 +00007818 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007819void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7820 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00007821{
7822#if BITS_PER_LONG == 64
Jarod Wilson92566452016-02-01 18:51:04 -05007823 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007824 memcpy(stats64, netdev_stats, sizeof(*stats64));
Jarod Wilson92566452016-02-01 18:51:04 -05007825 /* zero out counters that only exist in rtnl_link_stats64 */
7826 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7827 sizeof(*stats64) - sizeof(*netdev_stats));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007828#else
Jarod Wilson92566452016-02-01 18:51:04 -05007829 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007830 const unsigned long *src = (const unsigned long *)netdev_stats;
7831 u64 *dst = (u64 *)stats64;
7832
Jarod Wilson92566452016-02-01 18:51:04 -05007833 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007834 for (i = 0; i < n; i++)
7835 dst[i] = src[i];
Jarod Wilson92566452016-02-01 18:51:04 -05007836 /* zero out counters that only exist in rtnl_link_stats64 */
7837 memset((char *)stats64 + n * sizeof(u64), 0,
7838 sizeof(*stats64) - n * sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007839#endif
7840}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007841EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007842
Eric Dumazetd83345a2009-11-16 03:36:51 +00007843/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007844 * dev_get_stats - get network device statistics
7845 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07007846 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007847 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00007848 * Get network statistics from device. Return @storage.
7849 * The device driver may provide its own method by setting
7850 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7851 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007852 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00007853struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7854 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00007855{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007856 const struct net_device_ops *ops = dev->netdev_ops;
7857
Eric Dumazet28172732010-07-07 14:58:56 -07007858 if (ops->ndo_get_stats64) {
7859 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007860 ops->ndo_get_stats64(dev, storage);
7861 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00007862 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007863 } else {
7864 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07007865 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007866 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet015f0682014-03-27 08:45:56 -07007867 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
Jarod Wilson6e7333d2016-02-01 18:51:05 -05007868 storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
Eric Dumazet28172732010-07-07 14:58:56 -07007869 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07007870}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007871EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07007872
Eric Dumazet24824a02010-10-02 06:11:55 +00007873struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07007874{
Eric Dumazet24824a02010-10-02 06:11:55 +00007875 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07007876
Eric Dumazet24824a02010-10-02 06:11:55 +00007877#ifdef CONFIG_NET_CLS_ACT
7878 if (queue)
7879 return queue;
7880 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7881 if (!queue)
7882 return NULL;
7883 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007884 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00007885 queue->qdisc_sleeping = &noop_qdisc;
7886 rcu_assign_pointer(dev->ingress_queue, queue);
7887#endif
7888 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07007889}
7890
Eric Dumazet2c60db02012-09-16 09:17:26 +00007891static const struct ethtool_ops default_ethtool_ops;
7892
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00007893void netdev_set_default_ethtool_ops(struct net_device *dev,
7894 const struct ethtool_ops *ops)
7895{
7896 if (dev->ethtool_ops == &default_ethtool_ops)
7897 dev->ethtool_ops = ops;
7898}
7899EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7900
Eric Dumazet74d332c2013-10-30 13:10:44 -07007901void netdev_freemem(struct net_device *dev)
7902{
7903 char *addr = (char *)dev - dev->padded;
7904
WANG Cong4cb28972014-06-02 15:55:22 -07007905 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007906}
7907
Linus Torvalds1da177e2005-04-16 15:20:36 -07007908/**
tcharding722c9a02017-02-09 17:56:04 +11007909 * alloc_netdev_mqs - allocate network device
7910 * @sizeof_priv: size of private data to allocate space for
7911 * @name: device name format string
7912 * @name_assign_type: origin of device name
7913 * @setup: callback to initialize device
7914 * @txqs: the number of TX subqueues to allocate
7915 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07007916 *
tcharding722c9a02017-02-09 17:56:04 +11007917 * Allocates a struct net_device with private data area for driver use
7918 * and performs basic initialization. Also allocates subqueue structs
7919 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007920 */
Tom Herbert36909ea2011-01-09 19:36:31 +00007921struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02007922 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00007923 void (*setup)(struct net_device *),
7924 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007925{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007926 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07007927 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007928 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007929
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07007930 BUG_ON(strlen(name) >= sizeof(dev->name));
7931
Tom Herbert36909ea2011-01-09 19:36:31 +00007932 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007933 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00007934 return NULL;
7935 }
7936
Michael Daltona953be52014-01-16 22:23:28 -08007937#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00007938 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007939 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00007940 return NULL;
7941 }
7942#endif
7943
David S. Millerfd2ea0a2008-07-17 01:56:23 -07007944 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007945 if (sizeof_priv) {
7946 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007947 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007948 alloc_size += sizeof_priv;
7949 }
7950 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007951 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007952
Michal Hockoda6bc572017-05-08 15:57:31 -07007953 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT);
Joe Perches62b59422013-02-04 16:48:16 +00007954 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007955 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007956
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007957 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007958 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007959
Eric Dumazet29b44332010-10-11 10:22:12 +00007960 dev->pcpu_refcnt = alloc_percpu(int);
7961 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07007962 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007963
Linus Torvalds1da177e2005-04-16 15:20:36 -07007964 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00007965 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007966
Jiri Pirko22bedad32010-04-01 21:22:57 +00007967 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007968 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00007969
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007970 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007971
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07007972 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00007973 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007974
Herbert Xud565b0a2008-12-15 23:38:52 -08007975 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007976 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007977 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00007978 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007979 INIT_LIST_HEAD(&dev->adj_list.upper);
7980 INIT_LIST_HEAD(&dev->adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08007981 INIT_LIST_HEAD(&dev->ptype_all);
7982 INIT_LIST_HEAD(&dev->ptype_specific);
Jiri Kosina59cc1f62016-08-10 11:05:15 +02007983#ifdef CONFIG_NET_SCHED
7984 hash_init(dev->qdisc_hash);
7985#endif
Eric Dumazet02875872014-10-05 18:38:35 -07007986 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007987 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007988
Phil Suttera8131042016-02-17 15:37:43 +01007989 if (!dev->tx_queue_len) {
Phil Sutterf84bb1e2015-08-27 21:21:36 +02007990 dev->priv_flags |= IFF_NO_QUEUE;
Jesper Dangaard Brouer11597082016-11-03 14:56:06 +01007991 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
Phil Suttera8131042016-02-17 15:37:43 +01007992 }
Phil Sutter906470c2015-08-18 10:30:48 +02007993
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007994 dev->num_tx_queues = txqs;
7995 dev->real_num_tx_queues = txqs;
7996 if (netif_alloc_netdev_queues(dev))
7997 goto free_all;
7998
Michael Daltona953be52014-01-16 22:23:28 -08007999#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008000 dev->num_rx_queues = rxqs;
8001 dev->real_num_rx_queues = rxqs;
8002 if (netif_alloc_rx_queues(dev))
8003 goto free_all;
8004#endif
8005
Linus Torvalds1da177e2005-04-16 15:20:36 -07008006 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02008007 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00008008 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00008009 if (!dev->ethtool_ops)
8010 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +02008011
8012 nf_hook_ingress_init(dev);
8013
Linus Torvalds1da177e2005-04-16 15:20:36 -07008014 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008015
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008016free_all:
8017 free_netdev(dev);
8018 return NULL;
8019
Eric Dumazet29b44332010-10-11 10:22:12 +00008020free_pcpu:
8021 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07008022free_dev:
8023 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008024 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008025}
Tom Herbert36909ea2011-01-09 19:36:31 +00008026EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008027
8028/**
tcharding722c9a02017-02-09 17:56:04 +11008029 * free_netdev - free network device
8030 * @dev: device
Linus Torvalds1da177e2005-04-16 15:20:36 -07008031 *
tcharding722c9a02017-02-09 17:56:04 +11008032 * This function does the last stage of destroying an allocated device
8033 * interface. The reference to the device object is released. If this
8034 * is the last reference then it will be freed.Must be called in process
8035 * context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008036 */
8037void free_netdev(struct net_device *dev)
8038{
Herbert Xud565b0a2008-12-15 23:38:52 -08008039 struct napi_struct *p, *n;
David S. Millerb5cdae32017-04-18 15:36:58 -04008040 struct bpf_prog *prog;
Herbert Xud565b0a2008-12-15 23:38:52 -08008041
Eric Dumazet93d05d42015-11-18 06:31:03 -08008042 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -07008043 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08008044#ifdef CONFIG_SYSFS
Pankaj Gupta10595902015-01-12 11:41:28 +05308045 kvfree(dev->_rx);
Tom Herbertfe822242010-11-09 10:47:38 +00008046#endif
David S. Millere8a04642008-07-17 00:34:19 -07008047
Eric Dumazet33d480c2011-08-11 19:30:52 +00008048 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00008049
Jiri Pirkof001fde2009-05-05 02:48:28 +00008050 /* Flush device addresses */
8051 dev_addr_flush(dev);
8052
Herbert Xud565b0a2008-12-15 23:38:52 -08008053 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8054 netif_napi_del(p);
8055
Eric Dumazet29b44332010-10-11 10:22:12 +00008056 free_percpu(dev->pcpu_refcnt);
8057 dev->pcpu_refcnt = NULL;
8058
David S. Millerb5cdae32017-04-18 15:36:58 -04008059 prog = rcu_dereference_protected(dev->xdp_prog, 1);
8060 if (prog) {
8061 bpf_prog_put(prog);
8062 static_key_slow_dec(&generic_xdp_needed);
8063 }
8064
Stephen Hemminger3041a062006-05-26 13:25:24 -07008065 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008066 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07008067 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008068 return;
8069 }
8070
8071 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8072 dev->reg_state = NETREG_RELEASED;
8073
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07008074 /* will free via device release */
8075 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008076}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008077EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008078
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008079/**
8080 * synchronize_net - Synchronize with packet receive processing
8081 *
8082 * Wait for packets currently being received to be done.
8083 * Does not block later packets from starting.
8084 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008085void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008086{
8087 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00008088 if (rtnl_is_locked())
8089 synchronize_rcu_expedited();
8090 else
8091 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008092}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008093EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008094
8095/**
Eric Dumazet44a08732009-10-27 07:03:04 +00008096 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07008097 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00008098 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08008099 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008100 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008101 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00008102 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008103 *
8104 * Callers must hold the rtnl semaphore. You may want
8105 * unregister_netdev() instead of this.
8106 */
8107
Eric Dumazet44a08732009-10-27 07:03:04 +00008108void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008109{
Herbert Xua6620712007-12-12 19:21:56 -08008110 ASSERT_RTNL();
8111
Eric Dumazet44a08732009-10-27 07:03:04 +00008112 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00008113 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00008114 } else {
8115 rollback_registered(dev);
8116 /* Finish processing unregister after unlock */
8117 net_set_todo(dev);
8118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119}
Eric Dumazet44a08732009-10-27 07:03:04 +00008120EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008121
8122/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008123 * unregister_netdevice_many - unregister many devices
8124 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07008125 *
8126 * Note: As most callers use a stack allocated list_head,
8127 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008128 */
8129void unregister_netdevice_many(struct list_head *head)
8130{
8131 struct net_device *dev;
8132
8133 if (!list_empty(head)) {
8134 rollback_registered_many(head);
8135 list_for_each_entry(dev, head, unreg_list)
8136 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07008137 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008138 }
8139}
Eric Dumazet63c80992009-10-27 07:06:49 +00008140EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008141
8142/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008143 * unregister_netdev - remove device from the kernel
8144 * @dev: device
8145 *
8146 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008147 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008148 *
8149 * This is just a wrapper for unregister_netdevice that takes
8150 * the rtnl semaphore. In general you want to use this and not
8151 * unregister_netdevice.
8152 */
8153void unregister_netdev(struct net_device *dev)
8154{
8155 rtnl_lock();
8156 unregister_netdevice(dev);
8157 rtnl_unlock();
8158}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008159EXPORT_SYMBOL(unregister_netdev);
8160
Eric W. Biedermance286d32007-09-12 13:53:49 +02008161/**
8162 * dev_change_net_namespace - move device to different nethost namespace
8163 * @dev: device
8164 * @net: network namespace
8165 * @pat: If not NULL name pattern to try if the current device name
8166 * is already taken in the destination network namespace.
8167 *
8168 * This function shuts down a device interface and moves it
8169 * to a new network namespace. On success 0 is returned, on
8170 * a failure a netagive errno code is returned.
8171 *
8172 * Callers must hold the rtnl semaphore.
8173 */
8174
8175int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8176{
Eric W. Biedermance286d32007-09-12 13:53:49 +02008177 int err;
8178
8179 ASSERT_RTNL();
8180
8181 /* Don't allow namespace local devices to be moved. */
8182 err = -EINVAL;
8183 if (dev->features & NETIF_F_NETNS_LOCAL)
8184 goto out;
8185
8186 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008187 if (dev->reg_state != NETREG_REGISTERED)
8188 goto out;
8189
8190 /* Get out if there is nothing todo */
8191 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09008192 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008193 goto out;
8194
8195 /* Pick the destination device name, and ensure
8196 * we can use it in the destination network namespace.
8197 */
8198 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00008199 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008200 /* We get here if we can't use the current device name */
8201 if (!pat)
8202 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00008203 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008204 goto out;
8205 }
8206
8207 /*
8208 * And now a mini version of register_netdevice unregister_netdevice.
8209 */
8210
8211 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07008212 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008213
8214 /* And unlink it from device chain */
8215 err = -ENODEV;
8216 unlist_netdevice(dev);
8217
8218 synchronize_net();
8219
8220 /* Shutdown queueing discipline. */
8221 dev_shutdown(dev);
8222
8223 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11008224 * this device. They should clean all the things.
8225 *
8226 * Note that dev->reg_state stays at NETREG_REGISTERED.
8227 * This is wanted because this way 8021q and macvlan know
8228 * the device is just moving and can keep their slaves up.
8229 */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008230 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00008231 rcu_barrier();
8232 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008233 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008234
8235 /*
8236 * Flush the unicast and multicast chains
8237 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00008238 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00008239 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008240
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008241 /* Send a netdev-removed uevent to the old namespace */
8242 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008243 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008244
Eric W. Biedermance286d32007-09-12 13:53:49 +02008245 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09008246 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008247
Eric W. Biedermance286d32007-09-12 13:53:49 +02008248 /* If there is an ifindex conflict assign a new one */
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02008249 if (__dev_get_by_index(net, dev->ifindex))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008250 dev->ifindex = dev_new_index(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008251
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008252 /* Send a netdev-add uevent to the new namespace */
8253 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008254 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008255
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008256 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07008257 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008258 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008259
8260 /* Add the device back in the hashes */
8261 list_netdevice(dev);
8262
8263 /* Notify protocols, that a new device appeared. */
8264 call_netdevice_notifiers(NETDEV_REGISTER, dev);
8265
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008266 /*
8267 * Prevent userspace races by waiting until the network
8268 * device is fully setup before sending notifications.
8269 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008270 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008271
Eric W. Biedermance286d32007-09-12 13:53:49 +02008272 synchronize_net();
8273 err = 0;
8274out:
8275 return err;
8276}
Johannes Berg463d0182009-07-14 00:33:35 +02008277EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008278
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008279static int dev_cpu_dead(unsigned int oldcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008280{
8281 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008282 struct sk_buff *skb;
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008283 unsigned int cpu;
Ashwanth Goli97d8b6e2017-06-13 16:54:55 +05308284 struct softnet_data *sd, *oldsd, *remsd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008285
Linus Torvalds1da177e2005-04-16 15:20:36 -07008286 local_irq_disable();
8287 cpu = smp_processor_id();
8288 sd = &per_cpu(softnet_data, cpu);
8289 oldsd = &per_cpu(softnet_data, oldcpu);
8290
8291 /* Find end of our completion_queue. */
8292 list_skb = &sd->completion_queue;
8293 while (*list_skb)
8294 list_skb = &(*list_skb)->next;
8295 /* Append completion queue from offline CPU. */
8296 *list_skb = oldsd->completion_queue;
8297 oldsd->completion_queue = NULL;
8298
Linus Torvalds1da177e2005-04-16 15:20:36 -07008299 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00008300 if (oldsd->output_queue) {
8301 *sd->output_queue_tailp = oldsd->output_queue;
8302 sd->output_queue_tailp = oldsd->output_queue_tailp;
8303 oldsd->output_queue = NULL;
8304 oldsd->output_queue_tailp = &oldsd->output_queue;
8305 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008306 /* Append NAPI poll list from offline CPU, with one exception :
8307 * process_backlog() must be called by cpu owning percpu backlog.
8308 * We properly handle process_queue & input_pkt_queue later.
8309 */
8310 while (!list_empty(&oldsd->poll_list)) {
8311 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8312 struct napi_struct,
8313 poll_list);
8314
8315 list_del_init(&napi->poll_list);
8316 if (napi->poll == process_backlog)
8317 napi->state = 0;
8318 else
8319 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00008320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008321
8322 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8323 local_irq_enable();
8324
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05308325#ifdef CONFIG_RPS
8326 remsd = oldsd->rps_ipi_list;
8327 oldsd->rps_ipi_list = NULL;
8328#endif
8329 /* send out pending IPI's on offline CPU */
8330 net_rps_send_ipi(remsd);
8331
Linus Torvalds1da177e2005-04-16 15:20:36 -07008332 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00008333 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008334 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008335 input_queue_head_incr(oldsd);
8336 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008337 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008338 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008339 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07008340 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008341
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008342 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008343}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008344
Herbert Xu7f353bf2007-08-10 15:47:58 -07008345/**
Herbert Xub63365a2008-10-23 01:11:29 -07008346 * netdev_increment_features - increment feature set by one
8347 * @all: current feature set
8348 * @one: new feature set
8349 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07008350 *
8351 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07008352 * @one to the master device with current feature set @all. Will not
8353 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07008354 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00008355netdev_features_t netdev_increment_features(netdev_features_t all,
8356 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07008357{
Tom Herbertc8cd0982015-12-14 11:19:44 -08008358 if (mask & NETIF_F_HW_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08008359 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +00008360 mask |= NETIF_F_VLAN_CHALLENGED;
8361
Tom Herberta1882222015-12-14 11:19:43 -08008362 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +00008363 all &= one | ~NETIF_F_ALL_FOR_ALL;
8364
Michał Mirosław1742f182011-04-22 06:31:16 +00008365 /* If one device supports hw checksumming, set for all. */
Tom Herbertc8cd0982015-12-14 11:19:44 -08008366 if (all & NETIF_F_HW_CSUM)
8367 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008368
8369 return all;
8370}
Herbert Xub63365a2008-10-23 01:11:29 -07008371EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008372
Baruch Siach430f03c2013-06-02 20:43:55 +00008373static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008374{
8375 int i;
8376 struct hlist_head *hash;
8377
8378 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8379 if (hash != NULL)
8380 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8381 INIT_HLIST_HEAD(&hash[i]);
8382
8383 return hash;
8384}
8385
Eric W. Biederman881d9662007-09-17 11:56:21 -07008386/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07008387static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008388{
Rustad, Mark D734b6542012-07-18 09:06:07 +00008389 if (net != &init_net)
8390 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07008391
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008392 net->dev_name_head = netdev_create_hash();
8393 if (net->dev_name_head == NULL)
8394 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008395
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008396 net->dev_index_head = netdev_create_hash();
8397 if (net->dev_index_head == NULL)
8398 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008399
8400 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008401
8402err_idx:
8403 kfree(net->dev_name_head);
8404err_name:
8405 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008406}
8407
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008408/**
8409 * netdev_drivername - network driver for the device
8410 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008411 *
8412 * Determine network driver for device.
8413 */
David S. Miller3019de12011-06-06 16:41:33 -07008414const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07008415{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07008416 const struct device_driver *driver;
8417 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07008418 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07008419
8420 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008421 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07008422 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008423
8424 driver = parent->driver;
8425 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07008426 return driver->name;
8427 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008428}
8429
Joe Perches6ea754e2014-09-22 11:10:50 -07008430static void __netdev_printk(const char *level, const struct net_device *dev,
8431 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00008432{
Joe Perchesb004ff42012-09-12 20:12:19 -07008433 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008434 dev_printk_emit(level[1] - '0',
8435 dev->dev.parent,
8436 "%s %s %s%s: %pV",
8437 dev_driver_string(dev->dev.parent),
8438 dev_name(dev->dev.parent),
8439 netdev_name(dev), netdev_reg_state(dev),
8440 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008441 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008442 printk("%s%s%s: %pV",
8443 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008444 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07008445 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008446 }
Joe Perches256df2f2010-06-27 01:02:35 +00008447}
8448
Joe Perches6ea754e2014-09-22 11:10:50 -07008449void netdev_printk(const char *level, const struct net_device *dev,
8450 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00008451{
8452 struct va_format vaf;
8453 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00008454
8455 va_start(args, format);
8456
8457 vaf.fmt = format;
8458 vaf.va = &args;
8459
Joe Perches6ea754e2014-09-22 11:10:50 -07008460 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008461
Joe Perches256df2f2010-06-27 01:02:35 +00008462 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00008463}
8464EXPORT_SYMBOL(netdev_printk);
8465
8466#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07008467void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00008468{ \
Joe Perches256df2f2010-06-27 01:02:35 +00008469 struct va_format vaf; \
8470 va_list args; \
8471 \
8472 va_start(args, fmt); \
8473 \
8474 vaf.fmt = fmt; \
8475 vaf.va = &args; \
8476 \
Joe Perches6ea754e2014-09-22 11:10:50 -07008477 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07008478 \
Joe Perches256df2f2010-06-27 01:02:35 +00008479 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00008480} \
8481EXPORT_SYMBOL(func);
8482
8483define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8484define_netdev_printk_level(netdev_alert, KERN_ALERT);
8485define_netdev_printk_level(netdev_crit, KERN_CRIT);
8486define_netdev_printk_level(netdev_err, KERN_ERR);
8487define_netdev_printk_level(netdev_warn, KERN_WARNING);
8488define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8489define_netdev_printk_level(netdev_info, KERN_INFO);
8490
Pavel Emelyanov46650792007-10-08 20:38:39 -07008491static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008492{
8493 kfree(net->dev_name_head);
8494 kfree(net->dev_index_head);
8495}
8496
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008497static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07008498 .init = netdev_init,
8499 .exit = netdev_exit,
8500};
8501
Pavel Emelyanov46650792007-10-08 20:38:39 -07008502static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008503{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008504 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02008505 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008506 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02008507 * initial network namespace
8508 */
8509 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008510 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008511 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008512 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02008513
8514 /* Ignore unmoveable devices (i.e. loopback) */
8515 if (dev->features & NETIF_F_NETNS_LOCAL)
8516 continue;
8517
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008518 /* Leave virtual devices for the generic cleanup */
8519 if (dev->rtnl_link_ops)
8520 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08008521
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008522 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008523 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8524 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008525 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008526 pr_emerg("%s: failed to move %s to init_net: %d\n",
8527 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008528 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02008529 }
8530 }
8531 rtnl_unlock();
8532}
8533
Eric W. Biederman50624c92013-09-23 21:19:49 -07008534static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8535{
8536 /* Return with the rtnl_lock held when there are no network
8537 * devices unregistering in any network namespace in net_list.
8538 */
8539 struct net *net;
8540 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01008541 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008542
Peter Zijlstraff960a72014-10-29 17:04:56 +01008543 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008544 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07008545 unregistering = false;
8546 rtnl_lock();
8547 list_for_each_entry(net, net_list, exit_list) {
8548 if (net->dev_unreg_count > 0) {
8549 unregistering = true;
8550 break;
8551 }
8552 }
8553 if (!unregistering)
8554 break;
8555 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01008556
8557 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008558 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01008559 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008560}
8561
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008562static void __net_exit default_device_exit_batch(struct list_head *net_list)
8563{
8564 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04008565 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008566 * Do this across as many network namespaces as possible to
8567 * improve batching efficiency.
8568 */
8569 struct net_device *dev;
8570 struct net *net;
8571 LIST_HEAD(dev_kill_list);
8572
Eric W. Biederman50624c92013-09-23 21:19:49 -07008573 /* To prevent network device cleanup code from dereferencing
8574 * loopback devices or network devices that have been freed
8575 * wait here for all pending unregistrations to complete,
8576 * before unregistring the loopback device and allowing the
8577 * network namespace be freed.
8578 *
8579 * The netdev todo list containing all network devices
8580 * unregistrations that happen in default_device_exit_batch
8581 * will run in the rtnl_unlock() at the end of
8582 * default_device_exit_batch.
8583 */
8584 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008585 list_for_each_entry(net, net_list, exit_list) {
8586 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02008587 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008588 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8589 else
8590 unregister_netdevice_queue(dev, &dev_kill_list);
8591 }
8592 }
8593 unregister_netdevice_many(&dev_kill_list);
8594 rtnl_unlock();
8595}
8596
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008597static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008598 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008599 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02008600};
8601
Linus Torvalds1da177e2005-04-16 15:20:36 -07008602/*
8603 * Initialize the DEV module. At boot time this walks the device list and
8604 * unhooks any devices that fail to initialise (normally hardware not
8605 * present) and leaves us with a valid list of present and active devices.
8606 *
8607 */
8608
8609/*
8610 * This is called single threaded during boot, so no need
8611 * to take the rtnl semaphore.
8612 */
8613static int __init net_dev_init(void)
8614{
8615 int i, rc = -ENOMEM;
8616
8617 BUG_ON(!dev_boot_phase);
8618
Linus Torvalds1da177e2005-04-16 15:20:36 -07008619 if (dev_proc_init())
8620 goto out;
8621
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008622 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07008623 goto out;
8624
8625 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08008626 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008627 INIT_LIST_HEAD(&ptype_base[i]);
8628
Vlad Yasevich62532da2012-11-15 08:49:10 +00008629 INIT_LIST_HEAD(&offload_base);
8630
Eric W. Biederman881d9662007-09-17 11:56:21 -07008631 if (register_pernet_subsys(&netdev_net_ops))
8632 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008633
8634 /*
8635 * Initialise the packet receive queues.
8636 */
8637
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07008638 for_each_possible_cpu(i) {
Eric Dumazet41852492016-08-26 12:50:39 -07008639 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008640 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008641
Eric Dumazet41852492016-08-26 12:50:39 -07008642 INIT_WORK(flush, flush_backlog);
8643
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008644 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07008645 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008646 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00008647 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00008648#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008649 sd->csd.func = rps_trigger_softirq;
8650 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008651 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07008652#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00008653
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008654 sd->backlog.poll = process_backlog;
8655 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008656 }
8657
Linus Torvalds1da177e2005-04-16 15:20:36 -07008658 dev_boot_phase = 0;
8659
Eric W. Biederman505d4f72008-11-07 22:54:20 -08008660 /* The loopback device is special if any other network devices
8661 * is present in a network namespace the loopback device must
8662 * be present. Since we now dynamically allocate and free the
8663 * loopback device ensure this invariant is maintained by
8664 * keeping the loopback device as the first device on the
8665 * list of network devices. Ensuring the loopback devices
8666 * is the first device that appears and the last network device
8667 * that disappears.
8668 */
8669 if (register_pernet_device(&loopback_net_ops))
8670 goto out;
8671
8672 if (register_pernet_device(&default_device_ops))
8673 goto out;
8674
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008675 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8676 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008677
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008678 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8679 NULL, dev_cpu_dead);
8680 WARN_ON(rc < 0);
Thomas Graff38a9eb2015-07-21 10:43:56 +02008681 dst_subsys_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008682 rc = 0;
8683out:
8684 return rc;
8685}
8686
8687subsys_initcall(net_dev_init);