blob: 06e0a7492df86bdd84058895678a98a6b56a3cac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
tcharding722c9a02017-02-09 17:56:04 +11002 * NET3 Protocol independent device support routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
tcharding722c9a02017-02-09 17:56:04 +110010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
tcharding722c9a02017-02-09 17:56:04 +110024 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
tcharding722c9a02017-02-09 17:56:04 +110039 * Alan Cox : Fix ETH_P_ALL echoback lengths.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
tcharding722c9a02017-02-09 17:56:04 +110049 * Alan Cox : Fixed nasty side effect of device close
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
tcharding722c9a02017-02-09 17:56:04 +110070 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * - netif_rx() feedback
73 */
74
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080075#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070084#include <linux/sched/mm.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080085#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070095#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/notifier.h>
97#include <linux/skbuff.h>
Brenden Blancoa7862b42016-07-19 12:16:48 -070098#include <linux/bpf.h>
David S. Millerb5cdae32017-04-18 15:36:58 -040099#include <linux/bpf_trace.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200100#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -0800102#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700106#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/pkt_sched.h>
Jiri Pirko87d83092017-05-17 11:07:54 +0200108#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000110#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/highmem.h>
112#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500119#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700120#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700121#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700122#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700123#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700124#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700126#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700127#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700128#include <linux/ipv6.h>
129#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700130#include <linux/jhash.h>
131#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700132#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900133#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900134#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000135#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700136#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000137#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100138#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300139#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700140#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100141#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400142#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800143#include <linux/hrtimer.h>
Pablo Neirae687ad62015-05-13 18:19:38 +0200144#include <linux/netfilter_ingress.h>
Hariprasad Shenai40e4e712016-06-08 18:09:08 +0530145#include <linux/crash_dump.h>
Davide Carattib72b5bf2017-05-18 15:44:38 +0200146#include <linux/sctp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700148#include "net-sysfs.h"
149
Herbert Xud565b0a2008-12-15 23:38:52 -0800150/* Instead of increasing this, you should create a hash table. */
151#define MAX_GRO_SKBS 8
152
Herbert Xu5d38a072009-01-04 16:13:40 -0800153/* This should be increased if a protocol with a bigger head is added. */
154#define GRO_MAX_HEAD (MAX_HEADER + 128)
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000157static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000158struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
159struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000160static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000162static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli54951192014-07-01 21:39:43 -0700163static int call_netdevice_notifiers_info(unsigned long val,
164 struct net_device *dev,
165 struct netdev_notifier_info *info);
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200166static struct napi_struct *napi_by_id(unsigned int napi_id);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700169 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 * semaphore.
171 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800172 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 *
174 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * actual updates. This allows pure readers to access the list even
177 * while a writer is preparing to update it.
178 *
179 * To put it another way, dev_base_lock is held for writing only to
180 * protect against pure readers; the rtnl semaphore provides the
181 * protection against other writers.
182 *
183 * See, for example usages, register_netdevice() and
184 * unregister_netdevice(), which must be called with the rtnl
185 * semaphore held.
186 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188EXPORT_SYMBOL(dev_base_lock);
189
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300190/* protects napi_hash addition/deletion and napi_gen_id */
191static DEFINE_SPINLOCK(napi_hash_lock);
192
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800193static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800194static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300195
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200196static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000197
Thomas Graf4e985ad2011-06-21 03:11:20 +0000198static inline void dev_base_seq_inc(struct net *net)
199{
tcharding643aa9c2017-02-09 17:56:05 +1100200 while (++net->dev_base_seq == 0)
201 ;
Thomas Graf4e985ad2011-06-21 03:11:20 +0000202}
203
Eric W. Biederman881d9662007-09-17 11:56:21 -0700204static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Linus Torvalds8387ff22016-06-10 07:51:30 -0700206 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
Eric Dumazet95c96172012-04-15 05:58:06 +0000207
stephen hemminger08e98972009-11-10 07:20:34 +0000208 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biederman881d9662007-09-17 11:56:21 -0700211static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700213 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000216static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000217{
218#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000219 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000220#endif
221}
222
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000223static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000224{
225#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000226 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000227#endif
228}
229
Eric W. Biedermance286d32007-09-12 13:53:49 +0200230/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000231static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200232{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900233 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234
235 ASSERT_RTNL();
236
237 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800238 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000239 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000240 hlist_add_head_rcu(&dev->index_hlist,
241 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200242 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000243
244 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200245}
246
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000247/* Device list removal
248 * caller must respect a RCU grace period before freeing/reusing dev
249 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200250static void unlist_netdevice(struct net_device *dev)
251{
252 ASSERT_RTNL();
253
254 /* Unlink dev from the device chain */
255 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800256 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000257 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000258 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200259 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000260
261 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200262}
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264/*
265 * Our notifier list
266 */
267
Alan Sternf07d5b92006-05-09 15:23:03 -0700268static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
270/*
271 * Device drivers call our routines to queue packets here. We empty the
272 * queue in the local softnet handler.
273 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700274
Eric Dumazet9958da02010-04-17 04:17:02 +0000275DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700276EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
David S. Millercf508b12008-07-22 14:16:42 -0700278#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700279/*
David S. Millerc773e842008-07-08 23:13:53 -0700280 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700281 * according to dev->type
282 */
tcharding643aa9c2017-02-09 17:56:05 +1100283static const unsigned short netdev_lock_type[] = {
284 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700285 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
286 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
287 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
288 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
289 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
290 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
291 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
292 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
293 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
294 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
295 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400296 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
297 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
298 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700299
tcharding643aa9c2017-02-09 17:56:05 +1100300static const char *const netdev_lock_name[] = {
301 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
302 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
303 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
304 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
305 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
306 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
307 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
308 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
309 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
310 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
311 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
312 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
313 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
314 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
315 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700316
317static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700318static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700319
320static inline unsigned short netdev_lock_pos(unsigned short dev_type)
321{
322 int i;
323
324 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
325 if (netdev_lock_type[i] == dev_type)
326 return i;
327 /* the last key is used by default */
328 return ARRAY_SIZE(netdev_lock_type) - 1;
329}
330
David S. Millercf508b12008-07-22 14:16:42 -0700331static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
332 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700333{
334 int i;
335
336 i = netdev_lock_pos(dev_type);
337 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
338 netdev_lock_name[i]);
339}
David S. Millercf508b12008-07-22 14:16:42 -0700340
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
342{
343 int i;
344
345 i = netdev_lock_pos(dev->type);
346 lockdep_set_class_and_name(&dev->addr_list_lock,
347 &netdev_addr_lock_key[i],
348 netdev_lock_name[i]);
349}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700350#else
David S. Millercf508b12008-07-22 14:16:42 -0700351static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
352 unsigned short dev_type)
353{
354}
355static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700356{
357}
358#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100361 *
362 * Protocol management and registration routines
363 *
364 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 * Add a protocol ID to the list. Now that the input handler is
369 * smarter we can dispense with all the messy stuff that used to be
370 * here.
371 *
372 * BEWARE!!! Protocol handlers, mangling input packets,
373 * MUST BE last in hash buckets and checking protocol handlers
374 * MUST start from promiscuous ptype_all chain in net_bh.
375 * It is true now, do not change it.
376 * Explanation follows: if protocol handler, mangling packet, will
377 * be the first on list, it is not able to sense, that packet
378 * is cloned and should be copied-on-write, so that it will
379 * change it and subsequent readers will get broken packet.
380 * --ANK (980803)
381 */
382
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000383static inline struct list_head *ptype_head(const struct packet_type *pt)
384{
385 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800386 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000387 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800388 return pt->dev ? &pt->dev->ptype_specific :
389 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000390}
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392/**
393 * dev_add_pack - add packet handler
394 * @pt: packet type declaration
395 *
396 * Add a protocol handler to the networking stack. The passed &packet_type
397 * is linked into kernel lists and may not be freed until it has been
398 * removed from the kernel lists.
399 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900400 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 * guarantee all CPU's that are in middle of receiving packets
402 * will see the new packet type (until the next received packet).
403 */
404
405void dev_add_pack(struct packet_type *pt)
406{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000407 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000409 spin_lock(&ptype_lock);
410 list_add_rcu(&pt->list, head);
411 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700413EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415/**
416 * __dev_remove_pack - remove packet handler
417 * @pt: packet type declaration
418 *
419 * Remove a protocol handler that was previously added to the kernel
420 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
421 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900422 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 *
424 * The packet type might still be in use by receivers
425 * and must not be freed until after all the CPU's have gone
426 * through a quiescent state.
427 */
428void __dev_remove_pack(struct packet_type *pt)
429{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000430 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 struct packet_type *pt1;
432
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000433 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 list_for_each_entry(pt1, head, list) {
436 if (pt == pt1) {
437 list_del_rcu(&pt->list);
438 goto out;
439 }
440 }
441
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000442 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000444 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700446EXPORT_SYMBOL(__dev_remove_pack);
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448/**
449 * dev_remove_pack - remove packet handler
450 * @pt: packet type declaration
451 *
452 * Remove a protocol handler that was previously added to the kernel
453 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
454 * from the kernel lists and can be freed or reused once this function
455 * returns.
456 *
457 * This call sleeps to guarantee that no CPU is looking at the packet
458 * type after return.
459 */
460void dev_remove_pack(struct packet_type *pt)
461{
462 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 synchronize_net();
465}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700466EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Vlad Yasevich62532da2012-11-15 08:49:10 +0000468
469/**
470 * dev_add_offload - register offload handlers
471 * @po: protocol offload declaration
472 *
473 * Add protocol offload handlers to the networking stack. The passed
474 * &proto_offload is linked into kernel lists and may not be freed until
475 * it has been removed from the kernel lists.
476 *
477 * This call does not sleep therefore it can not
478 * guarantee all CPU's that are in middle of receiving packets
479 * will see the new offload handlers (until the next received packet).
480 */
481void dev_add_offload(struct packet_offload *po)
482{
David S. Millerbdef7de2015-06-01 14:56:09 -0700483 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000484
485 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700486 list_for_each_entry(elem, &offload_base, list) {
487 if (po->priority < elem->priority)
488 break;
489 }
490 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000491 spin_unlock(&offload_lock);
492}
493EXPORT_SYMBOL(dev_add_offload);
494
495/**
496 * __dev_remove_offload - remove offload handler
497 * @po: packet offload declaration
498 *
499 * Remove a protocol offload handler that was previously added to the
500 * kernel offload handlers by dev_add_offload(). The passed &offload_type
501 * is removed from the kernel lists and can be freed or reused once this
502 * function returns.
503 *
504 * The packet type might still be in use by receivers
505 * and must not be freed until after all the CPU's have gone
506 * through a quiescent state.
507 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800508static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000509{
510 struct list_head *head = &offload_base;
511 struct packet_offload *po1;
512
Eric Dumazetc53aa502012-11-16 08:08:23 +0000513 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000514
515 list_for_each_entry(po1, head, list) {
516 if (po == po1) {
517 list_del_rcu(&po->list);
518 goto out;
519 }
520 }
521
522 pr_warn("dev_remove_offload: %p not found\n", po);
523out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000524 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000525}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000526
527/**
528 * dev_remove_offload - remove packet offload handler
529 * @po: packet offload declaration
530 *
531 * Remove a packet offload handler that was previously added to the kernel
532 * offload handlers by dev_add_offload(). The passed &offload_type is
533 * removed from the kernel lists and can be freed or reused once this
534 * function returns.
535 *
536 * This call sleeps to guarantee that no CPU is looking at the packet
537 * type after return.
538 */
539void dev_remove_offload(struct packet_offload *po)
540{
541 __dev_remove_offload(po);
542
543 synchronize_net();
544}
545EXPORT_SYMBOL(dev_remove_offload);
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547/******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100548 *
549 * Device Boot-time Settings Routines
550 *
551 ******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553/* Boot time configuration table */
554static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
555
556/**
557 * netdev_boot_setup_add - add new setup entry
558 * @name: name of the device
559 * @map: configured settings for the device
560 *
561 * Adds new setup entry to the dev_boot_setup list. The function
562 * returns 0 on error and 1 on success. This is a generic routine to
563 * all netdevices.
564 */
565static int netdev_boot_setup_add(char *name, struct ifmap *map)
566{
567 struct netdev_boot_setup *s;
568 int i;
569
570 s = dev_boot_setup;
571 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
572 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
573 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700574 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 memcpy(&s[i].map, map, sizeof(s[i].map));
576 break;
577 }
578 }
579
580 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
581}
582
583/**
tcharding722c9a02017-02-09 17:56:04 +1100584 * netdev_boot_setup_check - check boot time settings
585 * @dev: the netdevice
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 *
tcharding722c9a02017-02-09 17:56:04 +1100587 * Check boot time settings for the device.
588 * The found settings are set for the device to be used
589 * later in the device probing.
590 * Returns 0 if no settings found, 1 if they are.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 */
592int netdev_boot_setup_check(struct net_device *dev)
593{
594 struct netdev_boot_setup *s = dev_boot_setup;
595 int i;
596
597 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
598 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700599 !strcmp(dev->name, s[i].name)) {
tcharding722c9a02017-02-09 17:56:04 +1100600 dev->irq = s[i].map.irq;
601 dev->base_addr = s[i].map.base_addr;
602 dev->mem_start = s[i].map.mem_start;
603 dev->mem_end = s[i].map.mem_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return 1;
605 }
606 }
607 return 0;
608}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700609EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611
612/**
tcharding722c9a02017-02-09 17:56:04 +1100613 * netdev_boot_base - get address from boot time settings
614 * @prefix: prefix for network device
615 * @unit: id for network device
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 *
tcharding722c9a02017-02-09 17:56:04 +1100617 * Check boot time settings for the base address of device.
618 * The found settings are set for the device to be used
619 * later in the device probing.
620 * Returns 0 if no settings found.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 */
622unsigned long netdev_boot_base(const char *prefix, int unit)
623{
624 const struct netdev_boot_setup *s = dev_boot_setup;
625 char name[IFNAMSIZ];
626 int i;
627
628 sprintf(name, "%s%d", prefix, unit);
629
630 /*
631 * If device already registered then return base of 1
632 * to indicate not to probe for this interface
633 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700634 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return 1;
636
637 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
638 if (!strcmp(name, s[i].name))
639 return s[i].map.base_addr;
640 return 0;
641}
642
643/*
644 * Saves at boot time configured settings for any netdevice.
645 */
646int __init netdev_boot_setup(char *str)
647{
648 int ints[5];
649 struct ifmap map;
650
651 str = get_options(str, ARRAY_SIZE(ints), ints);
652 if (!str || !*str)
653 return 0;
654
655 /* Save settings */
656 memset(&map, 0, sizeof(map));
657 if (ints[0] > 0)
658 map.irq = ints[1];
659 if (ints[0] > 1)
660 map.base_addr = ints[2];
661 if (ints[0] > 2)
662 map.mem_start = ints[3];
663 if (ints[0] > 3)
664 map.mem_end = ints[4];
665
666 /* Add new entry to the list */
667 return netdev_boot_setup_add(str, &map);
668}
669
670__setup("netdev=", netdev_boot_setup);
671
672/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100673 *
674 * Device Interface Subroutines
675 *
676 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200679 * dev_get_iflink - get 'iflink' value of a interface
680 * @dev: targeted interface
681 *
682 * Indicates the ifindex the interface is linked to.
683 * Physical interfaces have the same 'ifindex' and 'iflink' values.
684 */
685
686int dev_get_iflink(const struct net_device *dev)
687{
688 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
689 return dev->netdev_ops->ndo_get_iflink(dev);
690
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200691 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200692}
693EXPORT_SYMBOL(dev_get_iflink);
694
695/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700696 * dev_fill_metadata_dst - Retrieve tunnel egress information.
697 * @dev: targeted interface
698 * @skb: The packet.
699 *
700 * For better visibility of tunnel traffic OVS needs to retrieve
701 * egress tunnel information for a packet. Following API allows
702 * user to get this info.
703 */
704int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
705{
706 struct ip_tunnel_info *info;
707
708 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
709 return -EINVAL;
710
711 info = skb_tunnel_info_unclone(skb);
712 if (!info)
713 return -ENOMEM;
714 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
715 return -EINVAL;
716
717 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
718}
719EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
720
721/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700723 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 * @name: name to find
725 *
726 * Find an interface by name. Must be called under RTNL semaphore
727 * or @dev_base_lock. If the name is found a pointer to the device
728 * is returned. If the name is not found then %NULL is returned. The
729 * reference counters are not incremented so the caller must be
730 * careful with locks.
731 */
732
Eric W. Biederman881d9662007-09-17 11:56:21 -0700733struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700735 struct net_device *dev;
736 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Sasha Levinb67bfe02013-02-27 17:06:00 -0800738 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 if (!strncmp(dev->name, name, IFNAMSIZ))
740 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700741
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return NULL;
743}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700744EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746/**
tcharding722c9a02017-02-09 17:56:04 +1100747 * dev_get_by_name_rcu - find a device by its name
748 * @net: the applicable net namespace
749 * @name: name to find
Eric Dumazet72c95282009-10-30 07:11:27 +0000750 *
tcharding722c9a02017-02-09 17:56:04 +1100751 * Find an interface by name.
752 * If the name is found a pointer to the device is returned.
753 * If the name is not found then %NULL is returned.
754 * The reference counters are not incremented so the caller must be
755 * careful with locks. The caller must hold RCU lock.
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 */
757
758struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
759{
Eric Dumazet72c95282009-10-30 07:11:27 +0000760 struct net_device *dev;
761 struct hlist_head *head = dev_name_hash(net, name);
762
Sasha Levinb67bfe02013-02-27 17:06:00 -0800763 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000764 if (!strncmp(dev->name, name, IFNAMSIZ))
765 return dev;
766
767 return NULL;
768}
769EXPORT_SYMBOL(dev_get_by_name_rcu);
770
771/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700773 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 * @name: name to find
775 *
776 * Find an interface by name. This can be called from any
777 * context and does its own locking. The returned handle has
778 * the usage count incremented and the caller must use dev_put() to
779 * release it when it is no longer needed. %NULL is returned if no
780 * matching device is found.
781 */
782
Eric W. Biederman881d9662007-09-17 11:56:21 -0700783struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
785 struct net_device *dev;
786
Eric Dumazet72c95282009-10-30 07:11:27 +0000787 rcu_read_lock();
788 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (dev)
790 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000791 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 return dev;
793}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700794EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
796/**
797 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700798 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 * @ifindex: index of device
800 *
801 * Search for an interface by index. Returns %NULL if the device
802 * is not found or a pointer to the device. The device has not
803 * had its reference counter increased so the caller must be careful
804 * about locking. The caller must hold either the RTNL semaphore
805 * or @dev_base_lock.
806 */
807
Eric W. Biederman881d9662007-09-17 11:56:21 -0700808struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700810 struct net_device *dev;
811 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Sasha Levinb67bfe02013-02-27 17:06:00 -0800813 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (dev->ifindex == ifindex)
815 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700816
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 return NULL;
818}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700819EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000821/**
822 * dev_get_by_index_rcu - find a device by its ifindex
823 * @net: the applicable net namespace
824 * @ifindex: index of device
825 *
826 * Search for an interface by index. Returns %NULL if the device
827 * is not found or a pointer to the device. The device has not
828 * had its reference counter increased so the caller must be careful
829 * about locking. The caller must hold RCU lock.
830 */
831
832struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
833{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000834 struct net_device *dev;
835 struct hlist_head *head = dev_index_hash(net, ifindex);
836
Sasha Levinb67bfe02013-02-27 17:06:00 -0800837 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000838 if (dev->ifindex == ifindex)
839 return dev;
840
841 return NULL;
842}
843EXPORT_SYMBOL(dev_get_by_index_rcu);
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846/**
847 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700848 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 * @ifindex: index of device
850 *
851 * Search for an interface by index. Returns NULL if the device
852 * is not found or a pointer to the device. The device returned has
853 * had a reference added and the pointer is safe until the user calls
854 * dev_put to indicate they have finished with it.
855 */
856
Eric W. Biederman881d9662007-09-17 11:56:21 -0700857struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858{
859 struct net_device *dev;
860
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000861 rcu_read_lock();
862 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 if (dev)
864 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000865 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 return dev;
867}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700868EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870/**
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200871 * dev_get_by_napi_id - find a device by napi_id
872 * @napi_id: ID of the NAPI struct
873 *
874 * Search for an interface by NAPI ID. Returns %NULL if the device
875 * is not found or a pointer to the device. The device has not had
876 * its reference counter increased so the caller must be careful
877 * about locking. The caller must hold RCU lock.
878 */
879
880struct net_device *dev_get_by_napi_id(unsigned int napi_id)
881{
882 struct napi_struct *napi;
883
884 WARN_ON_ONCE(!rcu_read_lock_held());
885
886 if (napi_id < MIN_NAPI_ID)
887 return NULL;
888
889 napi = napi_by_id(napi_id);
890
891 return napi ? napi->dev : NULL;
892}
893EXPORT_SYMBOL(dev_get_by_napi_id);
894
895/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200896 * netdev_get_name - get a netdevice name, knowing its ifindex.
897 * @net: network namespace
898 * @name: a pointer to the buffer where the name will be stored.
899 * @ifindex: the ifindex of the interface to get the name from.
900 *
901 * The use of raw_seqcount_begin() and cond_resched() before
902 * retrying is required as we want to give the writers a chance
903 * to complete when CONFIG_PREEMPT is not set.
904 */
905int netdev_get_name(struct net *net, char *name, int ifindex)
906{
907 struct net_device *dev;
908 unsigned int seq;
909
910retry:
911 seq = raw_seqcount_begin(&devnet_rename_seq);
912 rcu_read_lock();
913 dev = dev_get_by_index_rcu(net, ifindex);
914 if (!dev) {
915 rcu_read_unlock();
916 return -ENODEV;
917 }
918
919 strcpy(name, dev->name);
920 rcu_read_unlock();
921 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
922 cond_resched();
923 goto retry;
924 }
925
926 return 0;
927}
928
929/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000930 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700931 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 * @type: media type of device
933 * @ha: hardware address
934 *
935 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800936 * is not found or a pointer to the device.
937 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000938 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 * and the caller must therefore be careful about locking
940 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 */
942
Eric Dumazet941666c2010-12-05 01:23:53 +0000943struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
944 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
946 struct net_device *dev;
947
Eric Dumazet941666c2010-12-05 01:23:53 +0000948 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 if (dev->type == type &&
950 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700951 return dev;
952
953 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954}
Eric Dumazet941666c2010-12-05 01:23:53 +0000955EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300956
Eric W. Biederman881d9662007-09-17 11:56:21 -0700957struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700958{
959 struct net_device *dev;
960
961 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700962 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700963 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700964 return dev;
965
966 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700967}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700968EXPORT_SYMBOL(__dev_getfirstbyhwtype);
969
Eric W. Biederman881d9662007-09-17 11:56:21 -0700970struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000972 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000974 rcu_read_lock();
975 for_each_netdev_rcu(net, dev)
976 if (dev->type == type) {
977 dev_hold(dev);
978 ret = dev;
979 break;
980 }
981 rcu_read_unlock();
982 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984EXPORT_SYMBOL(dev_getfirstbyhwtype);
985
986/**
WANG Cong6c555492014-09-11 15:35:09 -0700987 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700988 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 * @if_flags: IFF_* values
990 * @mask: bitmask of bits in if_flags to check
991 *
992 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000993 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700994 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 */
996
WANG Cong6c555492014-09-11 15:35:09 -0700997struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
998 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999{
Pavel Emelianov7562f872007-05-03 15:13:45 -07001000 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
WANG Cong6c555492014-09-11 15:35:09 -07001002 ASSERT_RTNL();
1003
Pavel Emelianov7562f872007-05-03 15:13:45 -07001004 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -07001005 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -07001007 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 break;
1009 }
1010 }
Pavel Emelianov7562f872007-05-03 15:13:45 -07001011 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012}
WANG Cong6c555492014-09-11 15:35:09 -07001013EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
1015/**
1016 * dev_valid_name - check if name is okay for network device
1017 * @name: name string
1018 *
1019 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -07001020 * to allow sysfs to work. We also disallow any kind of
1021 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 */
David S. Miller95f050b2012-03-06 16:12:15 -05001023bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
David S. Millerc7fa9d12006-08-15 16:34:13 -07001025 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -05001026 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07001027 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -05001028 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001029 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -05001030 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001031
1032 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -06001033 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001034 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001035 name++;
1036 }
David S. Miller95f050b2012-03-06 16:12:15 -05001037 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001039EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001042 * __dev_alloc_name - allocate a name for a device
1043 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001045 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 *
1047 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001048 * id. It scans list of devices to build up a free map, then chooses
1049 * the first empty slot. The caller must hold the dev_base or rtnl lock
1050 * while allocating the name and adding the device in order to avoid
1051 * duplicates.
1052 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1053 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 */
1055
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001056static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057{
1058 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 const char *p;
1060 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001061 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 struct net_device *d;
1063
1064 p = strnchr(name, IFNAMSIZ-1, '%');
1065 if (p) {
1066 /*
1067 * Verify the string as this thing may have come from
1068 * the user. There must be either one "%d" and no other "%"
1069 * characters.
1070 */
1071 if (p[1] != 'd' || strchr(p + 2, '%'))
1072 return -EINVAL;
1073
1074 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001075 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 if (!inuse)
1077 return -ENOMEM;
1078
Eric W. Biederman881d9662007-09-17 11:56:21 -07001079 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 if (!sscanf(d->name, name, &i))
1081 continue;
1082 if (i < 0 || i >= max_netdevices)
1083 continue;
1084
1085 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001086 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 if (!strncmp(buf, d->name, IFNAMSIZ))
1088 set_bit(i, inuse);
1089 }
1090
1091 i = find_first_zero_bit(inuse, max_netdevices);
1092 free_page((unsigned long) inuse);
1093 }
1094
Octavian Purdilad9031022009-11-18 02:36:59 +00001095 if (buf != name)
1096 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001097 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100 /* It is possible to run out of possible slots
1101 * when the name is long and there isn't enough space left
1102 * for the digits, or if all bits are used.
1103 */
1104 return -ENFILE;
1105}
1106
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001107/**
1108 * dev_alloc_name - allocate a name for a device
1109 * @dev: device
1110 * @name: name format string
1111 *
1112 * Passed a format string - eg "lt%d" it will try and find a suitable
1113 * id. It scans list of devices to build up a free map, then chooses
1114 * the first empty slot. The caller must hold the dev_base or rtnl lock
1115 * while allocating the name and adding the device in order to avoid
1116 * duplicates.
1117 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1118 * Returns the number of the unit assigned or a negative errno code.
1119 */
1120
1121int dev_alloc_name(struct net_device *dev, const char *name)
1122{
1123 char buf[IFNAMSIZ];
1124 struct net *net;
1125 int ret;
1126
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001127 BUG_ON(!dev_net(dev));
1128 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001129 ret = __dev_alloc_name(net, name, buf);
1130 if (ret >= 0)
1131 strlcpy(dev->name, buf, IFNAMSIZ);
1132 return ret;
1133}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001134EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001135
Gao feng828de4f2012-09-13 20:58:27 +00001136static int dev_alloc_name_ns(struct net *net,
1137 struct net_device *dev,
1138 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001139{
Gao feng828de4f2012-09-13 20:58:27 +00001140 char buf[IFNAMSIZ];
1141 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001142
Gao feng828de4f2012-09-13 20:58:27 +00001143 ret = __dev_alloc_name(net, name, buf);
1144 if (ret >= 0)
1145 strlcpy(dev->name, buf, IFNAMSIZ);
1146 return ret;
1147}
1148
1149static int dev_get_valid_name(struct net *net,
1150 struct net_device *dev,
1151 const char *name)
1152{
1153 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001154
Octavian Purdilad9031022009-11-18 02:36:59 +00001155 if (!dev_valid_name(name))
1156 return -EINVAL;
1157
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001158 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001159 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001160 else if (__dev_get_by_name(net, name))
1161 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001162 else if (dev->name != name)
1163 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001164
1165 return 0;
1166}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168/**
1169 * dev_change_name - change name of a device
1170 * @dev: device
1171 * @newname: name (or format string) must be at least IFNAMSIZ
1172 *
1173 * Change name of a device, can pass format strings "eth%d".
1174 * for wildcarding.
1175 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001176int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177{
Tom Gundersen238fa362014-07-14 16:37:23 +02001178 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001179 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001181 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001182 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001185 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001187 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 if (dev->flags & IFF_UP)
1189 return -EBUSY;
1190
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001191 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001192
1193 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001194 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001195 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001196 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001197
Herbert Xufcc5a032007-07-30 17:03:38 -07001198 memcpy(oldname, dev->name, IFNAMSIZ);
1199
Gao feng828de4f2012-09-13 20:58:27 +00001200 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001201 if (err < 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001202 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001203 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001206 if (oldname[0] && !strchr(oldname, '%'))
1207 netdev_info(dev, "renamed from %s\n", oldname);
1208
Tom Gundersen238fa362014-07-14 16:37:23 +02001209 old_assign_type = dev->name_assign_type;
1210 dev->name_assign_type = NET_NAME_RENAMED;
1211
Herbert Xufcc5a032007-07-30 17:03:38 -07001212rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001213 ret = device_rename(&dev->dev, dev->name);
1214 if (ret) {
1215 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001216 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001217 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001218 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001219 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001220
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001221 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001222
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001223 netdev_adjacent_rename_links(dev, oldname);
1224
Herbert Xu7f988ea2007-07-30 16:35:46 -07001225 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001226 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001227 write_unlock_bh(&dev_base_lock);
1228
1229 synchronize_rcu();
1230
1231 write_lock_bh(&dev_base_lock);
1232 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001233 write_unlock_bh(&dev_base_lock);
1234
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001235 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001236 ret = notifier_to_errno(ret);
1237
1238 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001239 /* err >= 0 after dev_alloc_name() or stores the first errno */
1240 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001241 err = ret;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001242 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001243 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001244 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001245 dev->name_assign_type = old_assign_type;
1246 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001247 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001248 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001249 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001250 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001251 }
1252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
1254 return err;
1255}
1256
1257/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001258 * dev_set_alias - change ifalias of a device
1259 * @dev: device
1260 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001261 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001262 *
1263 * Set ifalias for a device,
1264 */
1265int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1266{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001267 char *new_ifalias;
1268
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001269 ASSERT_RTNL();
1270
1271 if (len >= IFALIASZ)
1272 return -EINVAL;
1273
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001274 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001275 kfree(dev->ifalias);
1276 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001277 return 0;
1278 }
1279
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001280 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1281 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001282 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001283 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001284
1285 strlcpy(dev->ifalias, alias, len+1);
1286 return len;
1287}
1288
1289
1290/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001291 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001292 * @dev: device to cause notification
1293 *
1294 * Called to indicate a device has changed features.
1295 */
1296void netdev_features_change(struct net_device *dev)
1297{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001298 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001299}
1300EXPORT_SYMBOL(netdev_features_change);
1301
1302/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 * netdev_state_change - device changes state
1304 * @dev: device to cause notification
1305 *
1306 * Called to indicate a device has changed state. This function calls
1307 * the notifier chains for netdev_chain and sends a NEWLINK message
1308 * to the routing socket.
1309 */
1310void netdev_state_change(struct net_device *dev)
1311{
1312 if (dev->flags & IFF_UP) {
Loic Prylli54951192014-07-01 21:39:43 -07001313 struct netdev_notifier_change_info change_info;
1314
1315 change_info.flags_changed = 0;
1316 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1317 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001318 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 }
1320}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001321EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Amerigo Wangee89bab2012-08-09 22:14:56 +00001323/**
tcharding722c9a02017-02-09 17:56:04 +11001324 * netdev_notify_peers - notify network peers about existence of @dev
1325 * @dev: network device
Amerigo Wangee89bab2012-08-09 22:14:56 +00001326 *
1327 * Generate traffic such that interested network peers are aware of
1328 * @dev, such as by generating a gratuitous ARP. This may be used when
1329 * a device wants to inform the rest of the network about some sort of
1330 * reconfiguration such as a failover event or virtual machine
1331 * migration.
1332 */
1333void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001334{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001335 rtnl_lock();
1336 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
Vlad Yasevich37c343b2017-03-14 08:58:08 -04001337 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00001338 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001339}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001340EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001341
Patrick McHardybd380812010-02-26 06:34:53 +00001342static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001344 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001345 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001347 ASSERT_RTNL();
1348
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 if (!netif_device_present(dev))
1350 return -ENODEV;
1351
Neil Hormanca99ca12013-02-05 08:05:43 +00001352 /* Block netpoll from trying to do any rx path servicing.
1353 * If we don't do this there is a chance ndo_poll_controller
1354 * or ndo_poll may be running while we open the device
1355 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001356 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001357
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001358 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1359 ret = notifier_to_errno(ret);
1360 if (ret)
1361 return ret;
1362
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001364
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001365 if (ops->ndo_validate_addr)
1366 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001367
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001368 if (!ret && ops->ndo_open)
1369 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
Eric W. Biederman66b55522014-03-27 15:39:03 -07001371 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001372
Jeff Garzikbada3392007-10-23 20:19:37 -07001373 if (ret)
1374 clear_bit(__LINK_STATE_START, &dev->state);
1375 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001377 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001379 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 return ret;
1383}
Patrick McHardybd380812010-02-26 06:34:53 +00001384
1385/**
1386 * dev_open - prepare an interface for use.
1387 * @dev: device to open
1388 *
1389 * Takes a device from down to up state. The device's private open
1390 * function is invoked and then the multicast lists are loaded. Finally
1391 * the device is moved into the up state and a %NETDEV_UP message is
1392 * sent to the netdev notifier chain.
1393 *
1394 * Calling this function on an active interface is a nop. On a failure
1395 * a negative errno code is returned.
1396 */
1397int dev_open(struct net_device *dev)
1398{
1399 int ret;
1400
Patrick McHardybd380812010-02-26 06:34:53 +00001401 if (dev->flags & IFF_UP)
1402 return 0;
1403
Patrick McHardybd380812010-02-26 06:34:53 +00001404 ret = __dev_open(dev);
1405 if (ret < 0)
1406 return ret;
1407
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001408 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001409 call_netdevice_notifiers(NETDEV_UP, dev);
1410
1411 return ret;
1412}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001413EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
Octavian Purdila44345722010-12-13 12:44:07 +00001415static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416{
Octavian Purdila44345722010-12-13 12:44:07 +00001417 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001418
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001419 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001420 might_sleep();
1421
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001422 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001423 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001424 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001425
Octavian Purdila44345722010-12-13 12:44:07 +00001426 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
Octavian Purdila44345722010-12-13 12:44:07 +00001428 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
Octavian Purdila44345722010-12-13 12:44:07 +00001430 /* Synchronize to scheduled poll. We cannot touch poll list, it
1431 * can be even on different cpu. So just clear netif_running().
1432 *
1433 * dev->stop() will invoke napi_disable() on all of it's
1434 * napi_struct instances on this device.
1435 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001436 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
Octavian Purdila44345722010-12-13 12:44:07 +00001439 dev_deactivate_many(head);
1440
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001441 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001442 const struct net_device_ops *ops = dev->netdev_ops;
1443
1444 /*
1445 * Call the device specific close. This cannot fail.
1446 * Only if device is UP
1447 *
1448 * We allow it to be called even after a DETACH hot-plug
1449 * event.
1450 */
1451 if (ops->ndo_stop)
1452 ops->ndo_stop(dev);
1453
Octavian Purdila44345722010-12-13 12:44:07 +00001454 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001455 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001456 }
1457
1458 return 0;
1459}
1460
1461static int __dev_close(struct net_device *dev)
1462{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001463 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001464 LIST_HEAD(single);
1465
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001466 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001467 retval = __dev_close_many(&single);
1468 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001469
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001470 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001471}
1472
David S. Miller99c4a262015-03-18 22:52:33 -04001473int dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001474{
1475 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001476
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001477 /* Remove the devices that don't need to be closed */
1478 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001479 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001480 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001481
1482 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001483
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001484 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001485 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001486 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001487 if (unlink)
1488 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 return 0;
1492}
David S. Miller99c4a262015-03-18 22:52:33 -04001493EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001494
1495/**
1496 * dev_close - shutdown an interface.
1497 * @dev: device to shutdown
1498 *
1499 * This function moves an active device into down state. A
1500 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1501 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1502 * chain.
1503 */
1504int dev_close(struct net_device *dev)
1505{
Eric Dumazete14a5992011-05-10 12:26:06 -07001506 if (dev->flags & IFF_UP) {
1507 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001508
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001509 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001510 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001511 list_del(&single);
1512 }
dingtianhongda6e3782013-05-27 19:53:31 +00001513 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001514}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001515EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516
1517
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001518/**
1519 * dev_disable_lro - disable Large Receive Offload on a device
1520 * @dev: device
1521 *
1522 * Disable Large Receive Offload (LRO) on a net device. Must be
1523 * called under RTNL. This is needed if received packets may be
1524 * forwarded to another interface.
1525 */
1526void dev_disable_lro(struct net_device *dev)
1527{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001528 struct net_device *lower_dev;
1529 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001530
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001531 dev->wanted_features &= ~NETIF_F_LRO;
1532 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001533
Michał Mirosław22d59692011-04-21 12:42:15 +00001534 if (unlikely(dev->features & NETIF_F_LRO))
1535 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001536
1537 netdev_for_each_lower_dev(dev, lower_dev, iter)
1538 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001539}
1540EXPORT_SYMBOL(dev_disable_lro);
1541
Jiri Pirko351638e2013-05-28 01:30:21 +00001542static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1543 struct net_device *dev)
1544{
1545 struct netdev_notifier_info info;
1546
1547 netdev_notifier_info_init(&info, dev);
1548 return nb->notifier_call(nb, val, &info);
1549}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001550
Eric W. Biederman881d9662007-09-17 11:56:21 -07001551static int dev_boot_phase = 1;
1552
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553/**
tcharding722c9a02017-02-09 17:56:04 +11001554 * register_netdevice_notifier - register a network notifier block
1555 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 *
tcharding722c9a02017-02-09 17:56:04 +11001557 * Register a notifier to be called when network device events occur.
1558 * The notifier passed is linked into the kernel structures and must
1559 * not be reused until it has been unregistered. A negative errno code
1560 * is returned on a failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 *
tcharding722c9a02017-02-09 17:56:04 +11001562 * When registered all registration and up events are replayed
1563 * to the new notifier to allow device to have a race free
1564 * view of the network device list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 */
1566
1567int register_netdevice_notifier(struct notifier_block *nb)
1568{
1569 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001570 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001571 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 int err;
1573
1574 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001575 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001576 if (err)
1577 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001578 if (dev_boot_phase)
1579 goto unlock;
1580 for_each_net(net) {
1581 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001582 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001583 err = notifier_to_errno(err);
1584 if (err)
1585 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
Eric W. Biederman881d9662007-09-17 11:56:21 -07001587 if (!(dev->flags & IFF_UP))
1588 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001589
Jiri Pirko351638e2013-05-28 01:30:21 +00001590 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001593
1594unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 rtnl_unlock();
1596 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001597
1598rollback:
1599 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001600 for_each_net(net) {
1601 for_each_netdev(net, dev) {
1602 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001603 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001604
Eric W. Biederman881d9662007-09-17 11:56:21 -07001605 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001606 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1607 dev);
1608 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001609 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001610 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001611 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001612 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001613
RongQing.Li8f891482011-11-30 23:43:07 -05001614outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001615 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001616 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001618EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
1620/**
tcharding722c9a02017-02-09 17:56:04 +11001621 * unregister_netdevice_notifier - unregister a network notifier block
1622 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 *
tcharding722c9a02017-02-09 17:56:04 +11001624 * Unregister a notifier previously registered by
1625 * register_netdevice_notifier(). The notifier is unlinked into the
1626 * kernel structures and may then be reused. A negative errno code
1627 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001628 *
tcharding722c9a02017-02-09 17:56:04 +11001629 * After unregistering unregister and down device events are synthesized
1630 * for all devices on the device list to the removed notifier to remove
1631 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 */
1633
1634int unregister_netdevice_notifier(struct notifier_block *nb)
1635{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001636 struct net_device *dev;
1637 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001638 int err;
1639
1640 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001641 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001642 if (err)
1643 goto unlock;
1644
1645 for_each_net(net) {
1646 for_each_netdev(net, dev) {
1647 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001648 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1649 dev);
1650 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001651 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001652 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001653 }
1654 }
1655unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001656 rtnl_unlock();
1657 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001659EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
1661/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001662 * call_netdevice_notifiers_info - call all network notifier blocks
1663 * @val: value passed unmodified to notifier function
1664 * @dev: net_device pointer passed unmodified to notifier function
1665 * @info: notifier information data
1666 *
1667 * Call all network notifier blocks. Parameters and return value
1668 * are as for raw_notifier_call_chain().
1669 */
1670
stephen hemminger1d143d92013-12-29 14:01:29 -08001671static int call_netdevice_notifiers_info(unsigned long val,
1672 struct net_device *dev,
1673 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001674{
1675 ASSERT_RTNL();
1676 netdev_notifier_info_init(info, dev);
1677 return raw_notifier_call_chain(&netdev_chain, val, info);
1678}
Jiri Pirko351638e2013-05-28 01:30:21 +00001679
1680/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 * call_netdevice_notifiers - call all network notifier blocks
1682 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001683 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 *
1685 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001686 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 */
1688
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001689int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690{
Jiri Pirko351638e2013-05-28 01:30:21 +00001691 struct netdev_notifier_info info;
1692
1693 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001695EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Pablo Neira1cf519002015-05-13 18:19:37 +02001697#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02001698static struct static_key ingress_needed __read_mostly;
1699
1700void net_inc_ingress_queue(void)
1701{
1702 static_key_slow_inc(&ingress_needed);
1703}
1704EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1705
1706void net_dec_ingress_queue(void)
1707{
1708 static_key_slow_dec(&ingress_needed);
1709}
1710EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1711#endif
1712
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001713#ifdef CONFIG_NET_EGRESS
1714static struct static_key egress_needed __read_mostly;
1715
1716void net_inc_egress_queue(void)
1717{
1718 static_key_slow_inc(&egress_needed);
1719}
1720EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1721
1722void net_dec_egress_queue(void)
1723{
1724 static_key_slow_dec(&egress_needed);
1725}
1726EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1727#endif
1728
Ingo Molnarc5905af2012-02-24 08:31:31 +01001729static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001730#ifdef HAVE_JUMP_LABEL
Eric Dumazetb90e5792011-11-28 11:16:50 +00001731static atomic_t netstamp_needed_deferred;
Eric Dumazet13baa002017-03-01 14:28:39 -08001732static atomic_t netstamp_wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001733static void netstamp_clear(struct work_struct *work)
1734{
1735 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
Eric Dumazet13baa002017-03-01 14:28:39 -08001736 int wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001737
Eric Dumazet13baa002017-03-01 14:28:39 -08001738 wanted = atomic_add_return(deferred, &netstamp_wanted);
1739 if (wanted > 0)
1740 static_key_enable(&netstamp_needed);
1741 else
1742 static_key_disable(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001743}
1744static DECLARE_WORK(netstamp_work, netstamp_clear);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001745#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
1747void net_enable_timestamp(void)
1748{
Eric Dumazet13baa002017-03-01 14:28:39 -08001749#ifdef HAVE_JUMP_LABEL
1750 int wanted;
1751
1752 while (1) {
1753 wanted = atomic_read(&netstamp_wanted);
1754 if (wanted <= 0)
1755 break;
1756 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1757 return;
1758 }
1759 atomic_inc(&netstamp_needed_deferred);
1760 schedule_work(&netstamp_work);
1761#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001762 static_key_slow_inc(&netstamp_needed);
Eric Dumazet13baa002017-03-01 14:28:39 -08001763#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001765EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
1767void net_disable_timestamp(void)
1768{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001769#ifdef HAVE_JUMP_LABEL
Eric Dumazet13baa002017-03-01 14:28:39 -08001770 int wanted;
1771
1772 while (1) {
1773 wanted = atomic_read(&netstamp_wanted);
1774 if (wanted <= 1)
1775 break;
1776 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1777 return;
1778 }
1779 atomic_dec(&netstamp_needed_deferred);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001780 schedule_work(&netstamp_work);
1781#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001782 static_key_slow_dec(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001783#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001785EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Eric Dumazet3b098e22010-05-15 23:57:10 -07001787static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788{
Thomas Gleixner2456e852016-12-25 11:38:40 +01001789 skb->tstamp = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001790 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001791 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792}
1793
Eric Dumazet588f0332011-11-15 04:12:55 +00001794#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001795 if (static_key_false(&netstamp_needed)) { \
Thomas Gleixner2456e852016-12-25 11:38:40 +01001796 if ((COND) && !(SKB)->tstamp) \
Eric Dumazet588f0332011-11-15 04:12:55 +00001797 __net_timestamp(SKB); \
1798 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001799
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02001800bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001801{
1802 unsigned int len;
1803
1804 if (!(dev->flags & IFF_UP))
1805 return false;
1806
1807 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1808 if (skb->len <= len)
1809 return true;
1810
1811 /* if TSO is enabled, we don't care about the length as the packet
1812 * could be forwarded without being segmented before
1813 */
1814 if (skb_is_gso(skb))
1815 return true;
1816
1817 return false;
1818}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001819EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001820
Herbert Xua0265d22014-04-17 13:45:03 +08001821int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1822{
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001823 int ret = ____dev_forward_skb(dev, skb);
1824
1825 if (likely(!ret)) {
1826 skb->protocol = eth_type_trans(skb, dev);
1827 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001828 }
1829
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001830 return ret;
Herbert Xua0265d22014-04-17 13:45:03 +08001831}
1832EXPORT_SYMBOL_GPL(__dev_forward_skb);
1833
Arnd Bergmann44540962009-11-26 06:07:08 +00001834/**
1835 * dev_forward_skb - loopback an skb to another netif
1836 *
1837 * @dev: destination network device
1838 * @skb: buffer to forward
1839 *
1840 * return values:
1841 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001842 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001843 *
1844 * dev_forward_skb can be used for injecting an skb from the
1845 * start_xmit function of one device into the receive queue
1846 * of another device.
1847 *
1848 * The receiving device may be in another namespace, so
1849 * we have to clear all information in the skb that could
1850 * impact namespace isolation.
1851 */
1852int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1853{
Herbert Xua0265d22014-04-17 13:45:03 +08001854 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001855}
1856EXPORT_SYMBOL_GPL(dev_forward_skb);
1857
Changli Gao71d9dec2010-12-15 19:57:25 +00001858static inline int deliver_skb(struct sk_buff *skb,
1859 struct packet_type *pt_prev,
1860 struct net_device *orig_dev)
1861{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001862 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1863 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001864 atomic_inc(&skb->users);
1865 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1866}
1867
Salam Noureddine7866a622015-01-27 11:35:48 -08001868static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1869 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001870 struct net_device *orig_dev,
1871 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001872 struct list_head *ptype_list)
1873{
1874 struct packet_type *ptype, *pt_prev = *pt;
1875
1876 list_for_each_entry_rcu(ptype, ptype_list, list) {
1877 if (ptype->type != type)
1878 continue;
1879 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001880 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001881 pt_prev = ptype;
1882 }
1883 *pt = pt_prev;
1884}
1885
Eric Leblondc0de08d2012-08-16 22:02:58 +00001886static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1887{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001888 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001889 return false;
1890
1891 if (ptype->id_match)
1892 return ptype->id_match(ptype, skb->sk);
1893 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1894 return true;
1895
1896 return false;
1897}
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899/*
1900 * Support routine. Sends outgoing frames to any network
1901 * taps currently in use.
1902 */
1903
David Ahern74b20582016-05-10 11:19:50 -07001904void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905{
1906 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001907 struct sk_buff *skb2 = NULL;
1908 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001909 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001910
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001912again:
1913 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 /* Never send packets back to the socket
1915 * they originated from - MvS (miquels@drinkel.ow.org)
1916 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001917 if (skb_loop_sk(ptype, skb))
1918 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001919
Salam Noureddine7866a622015-01-27 11:35:48 -08001920 if (pt_prev) {
1921 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001922 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001923 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001925
1926 /* need to clone skb, done only once */
1927 skb2 = skb_clone(skb, GFP_ATOMIC);
1928 if (!skb2)
1929 goto out_unlock;
1930
1931 net_timestamp_set(skb2);
1932
1933 /* skb->nh should be correctly
1934 * set by sender, so that the second statement is
1935 * just protection against buggy protocols.
1936 */
1937 skb_reset_mac_header(skb2);
1938
1939 if (skb_network_header(skb2) < skb2->data ||
1940 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1941 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1942 ntohs(skb2->protocol),
1943 dev->name);
1944 skb_reset_network_header(skb2);
1945 }
1946
1947 skb2->transport_header = skb2->network_header;
1948 skb2->pkt_type = PACKET_OUTGOING;
1949 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001951
1952 if (ptype_list == &ptype_all) {
1953 ptype_list = &dev->ptype_all;
1954 goto again;
1955 }
1956out_unlock:
Changli Gao71d9dec2010-12-15 19:57:25 +00001957 if (pt_prev)
1958 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 rcu_read_unlock();
1960}
David Ahern74b20582016-05-10 11:19:50 -07001961EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
Ben Hutchings2c530402012-07-10 10:55:09 +00001963/**
1964 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001965 * @dev: Network device
1966 * @txq: number of queues available
1967 *
1968 * If real_num_tx_queues is changed the tc mappings may no longer be
1969 * valid. To resolve this verify the tc mapping remains valid and if
1970 * not NULL the mapping. With no priorities mapping to this
1971 * offset/count pair it will no longer be used. In the worst case TC0
1972 * is invalid nothing can be done so disable priority mappings. If is
1973 * expected that drivers will fix this mapping if they can before
1974 * calling netif_set_real_num_tx_queues.
1975 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001976static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001977{
1978 int i;
1979 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1980
1981 /* If TC0 is invalidated disable TC mapping */
1982 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001983 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001984 dev->num_tc = 0;
1985 return;
1986 }
1987
1988 /* Invalidated prio to tc mappings set to TC0 */
1989 for (i = 1; i < TC_BITMASK + 1; i++) {
1990 int q = netdev_get_prio_tc_map(dev, i);
1991
1992 tc = &dev->tc_to_txq[q];
1993 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001994 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1995 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001996 netdev_set_prio_tc_map(dev, i, 0);
1997 }
1998 }
1999}
2000
Alexander Duyck8d059b02016-10-28 11:43:49 -04002001int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2002{
2003 if (dev->num_tc) {
2004 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2005 int i;
2006
2007 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2008 if ((txq - tc->offset) < tc->count)
2009 return i;
2010 }
2011
2012 return -1;
2013 }
2014
2015 return 0;
2016}
2017
Alexander Duyck537c00d2013-01-10 08:57:02 +00002018#ifdef CONFIG_XPS
2019static DEFINE_MUTEX(xps_map_mutex);
2020#define xmap_dereference(P) \
2021 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2022
Alexander Duyck6234f872016-10-28 11:46:49 -04002023static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2024 int tci, u16 index)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002025{
2026 struct xps_map *map = NULL;
2027 int pos;
2028
2029 if (dev_maps)
Alexander Duyck6234f872016-10-28 11:46:49 -04002030 map = xmap_dereference(dev_maps->cpu_map[tci]);
2031 if (!map)
2032 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002033
Alexander Duyck6234f872016-10-28 11:46:49 -04002034 for (pos = map->len; pos--;) {
2035 if (map->queues[pos] != index)
2036 continue;
2037
2038 if (map->len > 1) {
2039 map->queues[pos] = map->queues[--map->len];
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002040 break;
2041 }
Alexander Duyck6234f872016-10-28 11:46:49 -04002042
2043 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2044 kfree_rcu(map, rcu);
2045 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002046 }
2047
Alexander Duyck6234f872016-10-28 11:46:49 -04002048 return true;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002049}
2050
Alexander Duyck6234f872016-10-28 11:46:49 -04002051static bool remove_xps_queue_cpu(struct net_device *dev,
2052 struct xps_dev_maps *dev_maps,
2053 int cpu, u16 offset, u16 count)
2054{
Alexander Duyck184c4492016-10-28 11:50:13 -04002055 int num_tc = dev->num_tc ? : 1;
2056 bool active = false;
2057 int tci;
Alexander Duyck6234f872016-10-28 11:46:49 -04002058
Alexander Duyck184c4492016-10-28 11:50:13 -04002059 for (tci = cpu * num_tc; num_tc--; tci++) {
2060 int i, j;
2061
2062 for (i = count, j = offset; i--; j++) {
2063 if (!remove_xps_queue(dev_maps, cpu, j))
2064 break;
2065 }
2066
2067 active |= i < 0;
Alexander Duyck6234f872016-10-28 11:46:49 -04002068 }
2069
Alexander Duyck184c4492016-10-28 11:50:13 -04002070 return active;
Alexander Duyck6234f872016-10-28 11:46:49 -04002071}
2072
2073static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2074 u16 count)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002075{
2076 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00002077 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002078 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002079
2080 mutex_lock(&xps_map_mutex);
2081 dev_maps = xmap_dereference(dev->xps_maps);
2082
2083 if (!dev_maps)
2084 goto out_no_maps;
2085
Alexander Duyck6234f872016-10-28 11:46:49 -04002086 for_each_possible_cpu(cpu)
2087 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2088 offset, count);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002089
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002090 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00002091 RCU_INIT_POINTER(dev->xps_maps, NULL);
2092 kfree_rcu(dev_maps, rcu);
2093 }
2094
Alexander Duyck6234f872016-10-28 11:46:49 -04002095 for (i = offset + (count - 1); count--; i--)
Alexander Duyck024e9672013-01-10 08:57:46 +00002096 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2097 NUMA_NO_NODE);
2098
Alexander Duyck537c00d2013-01-10 08:57:02 +00002099out_no_maps:
2100 mutex_unlock(&xps_map_mutex);
2101}
2102
Alexander Duyck6234f872016-10-28 11:46:49 -04002103static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2104{
2105 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2106}
2107
Alexander Duyck01c5f862013-01-10 08:57:35 +00002108static struct xps_map *expand_xps_map(struct xps_map *map,
2109 int cpu, u16 index)
2110{
2111 struct xps_map *new_map;
2112 int alloc_len = XPS_MIN_MAP_ALLOC;
2113 int i, pos;
2114
2115 for (pos = 0; map && pos < map->len; pos++) {
2116 if (map->queues[pos] != index)
2117 continue;
2118 return map;
2119 }
2120
2121 /* Need to add queue to this CPU's existing map */
2122 if (map) {
2123 if (pos < map->alloc_len)
2124 return map;
2125
2126 alloc_len = map->alloc_len * 2;
2127 }
2128
2129 /* Need to allocate new map to store queue on this CPU's map */
2130 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2131 cpu_to_node(cpu));
2132 if (!new_map)
2133 return NULL;
2134
2135 for (i = 0; i < pos; i++)
2136 new_map->queues[i] = map->queues[i];
2137 new_map->alloc_len = alloc_len;
2138 new_map->len = pos;
2139
2140 return new_map;
2141}
2142
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002143int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2144 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002145{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002146 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck184c4492016-10-28 11:50:13 -04002147 int i, cpu, tci, numa_node_id = -2;
2148 int maps_sz, num_tc = 1, tc = 0;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002149 struct xps_map *map, *new_map;
Alexander Duyck01c5f862013-01-10 08:57:35 +00002150 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002151
Alexander Duyck184c4492016-10-28 11:50:13 -04002152 if (dev->num_tc) {
2153 num_tc = dev->num_tc;
2154 tc = netdev_txq_to_tc(dev, index);
2155 if (tc < 0)
2156 return -EINVAL;
2157 }
2158
2159 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2160 if (maps_sz < L1_CACHE_BYTES)
2161 maps_sz = L1_CACHE_BYTES;
2162
Alexander Duyck537c00d2013-01-10 08:57:02 +00002163 mutex_lock(&xps_map_mutex);
2164
2165 dev_maps = xmap_dereference(dev->xps_maps);
2166
Alexander Duyck01c5f862013-01-10 08:57:35 +00002167 /* allocate memory for queue storage */
Alexander Duyck184c4492016-10-28 11:50:13 -04002168 for_each_cpu_and(cpu, cpu_online_mask, mask) {
Alexander Duyck01c5f862013-01-10 08:57:35 +00002169 if (!new_dev_maps)
2170 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002171 if (!new_dev_maps) {
2172 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002173 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002174 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002175
Alexander Duyck184c4492016-10-28 11:50:13 -04002176 tci = cpu * num_tc + tc;
2177 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
Alexander Duyck01c5f862013-01-10 08:57:35 +00002178 NULL;
2179
2180 map = expand_xps_map(map, cpu, index);
2181 if (!map)
2182 goto error;
2183
Alexander Duyck184c4492016-10-28 11:50:13 -04002184 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002185 }
2186
2187 if (!new_dev_maps)
2188 goto out_no_new_maps;
2189
2190 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002191 /* copy maps belonging to foreign traffic classes */
2192 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2193 /* fill in the new device map from the old device map */
2194 map = xmap_dereference(dev_maps->cpu_map[tci]);
2195 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2196 }
2197
2198 /* We need to explicitly update tci as prevous loop
2199 * could break out early if dev_maps is NULL.
2200 */
2201 tci = cpu * num_tc + tc;
2202
Alexander Duyck01c5f862013-01-10 08:57:35 +00002203 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2204 /* add queue to CPU maps */
2205 int pos = 0;
2206
Alexander Duyck184c4492016-10-28 11:50:13 -04002207 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002208 while ((pos < map->len) && (map->queues[pos] != index))
2209 pos++;
2210
2211 if (pos == map->len)
2212 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002213#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002214 if (numa_node_id == -2)
2215 numa_node_id = cpu_to_node(cpu);
2216 else if (numa_node_id != cpu_to_node(cpu))
2217 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002218#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002219 } else if (dev_maps) {
2220 /* fill in the new device map from the old device map */
Alexander Duyck184c4492016-10-28 11:50:13 -04002221 map = xmap_dereference(dev_maps->cpu_map[tci]);
2222 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002223 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002224
Alexander Duyck184c4492016-10-28 11:50:13 -04002225 /* copy maps belonging to foreign traffic classes */
2226 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2227 /* fill in the new device map from the old device map */
2228 map = xmap_dereference(dev_maps->cpu_map[tci]);
2229 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2230 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002231 }
2232
Alexander Duyck01c5f862013-01-10 08:57:35 +00002233 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2234
Alexander Duyck537c00d2013-01-10 08:57:02 +00002235 /* Cleanup old maps */
Alexander Duyck184c4492016-10-28 11:50:13 -04002236 if (!dev_maps)
2237 goto out_no_old_maps;
2238
2239 for_each_possible_cpu(cpu) {
2240 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2241 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2242 map = xmap_dereference(dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002243 if (map && map != new_map)
2244 kfree_rcu(map, rcu);
2245 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002246 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002247
Alexander Duyck184c4492016-10-28 11:50:13 -04002248 kfree_rcu(dev_maps, rcu);
2249
2250out_no_old_maps:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002251 dev_maps = new_dev_maps;
2252 active = true;
2253
2254out_no_new_maps:
2255 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002256 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2257 (numa_node_id >= 0) ? numa_node_id :
2258 NUMA_NO_NODE);
2259
Alexander Duyck01c5f862013-01-10 08:57:35 +00002260 if (!dev_maps)
2261 goto out_no_maps;
2262
2263 /* removes queue from unused CPUs */
2264 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002265 for (i = tc, tci = cpu * num_tc; i--; tci++)
2266 active |= remove_xps_queue(dev_maps, tci, index);
2267 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2268 active |= remove_xps_queue(dev_maps, tci, index);
2269 for (i = num_tc - tc, tci++; --i; tci++)
2270 active |= remove_xps_queue(dev_maps, tci, index);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002271 }
2272
2273 /* free map if not active */
2274 if (!active) {
2275 RCU_INIT_POINTER(dev->xps_maps, NULL);
2276 kfree_rcu(dev_maps, rcu);
2277 }
2278
2279out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002280 mutex_unlock(&xps_map_mutex);
2281
2282 return 0;
2283error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002284 /* remove any maps that we added */
2285 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002286 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2287 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2288 map = dev_maps ?
2289 xmap_dereference(dev_maps->cpu_map[tci]) :
2290 NULL;
2291 if (new_map && new_map != map)
2292 kfree(new_map);
2293 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002294 }
2295
Alexander Duyck537c00d2013-01-10 08:57:02 +00002296 mutex_unlock(&xps_map_mutex);
2297
Alexander Duyck537c00d2013-01-10 08:57:02 +00002298 kfree(new_dev_maps);
2299 return -ENOMEM;
2300}
2301EXPORT_SYMBOL(netif_set_xps_queue);
2302
2303#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002304void netdev_reset_tc(struct net_device *dev)
2305{
Alexander Duyck6234f872016-10-28 11:46:49 -04002306#ifdef CONFIG_XPS
2307 netif_reset_xps_queues_gt(dev, 0);
2308#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002309 dev->num_tc = 0;
2310 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2311 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2312}
2313EXPORT_SYMBOL(netdev_reset_tc);
2314
2315int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2316{
2317 if (tc >= dev->num_tc)
2318 return -EINVAL;
2319
Alexander Duyck6234f872016-10-28 11:46:49 -04002320#ifdef CONFIG_XPS
2321 netif_reset_xps_queues(dev, offset, count);
2322#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002323 dev->tc_to_txq[tc].count = count;
2324 dev->tc_to_txq[tc].offset = offset;
2325 return 0;
2326}
2327EXPORT_SYMBOL(netdev_set_tc_queue);
2328
2329int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2330{
2331 if (num_tc > TC_MAX_QUEUE)
2332 return -EINVAL;
2333
Alexander Duyck6234f872016-10-28 11:46:49 -04002334#ifdef CONFIG_XPS
2335 netif_reset_xps_queues_gt(dev, 0);
2336#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002337 dev->num_tc = num_tc;
2338 return 0;
2339}
2340EXPORT_SYMBOL(netdev_set_num_tc);
2341
John Fastabendf0796d52010-07-01 13:21:57 +00002342/*
2343 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2344 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2345 */
Tom Herberte6484932010-10-18 18:04:39 +00002346int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002347{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002348 int rc;
2349
Tom Herberte6484932010-10-18 18:04:39 +00002350 if (txq < 1 || txq > dev->num_tx_queues)
2351 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002352
Ben Hutchings5c565802011-02-15 19:39:21 +00002353 if (dev->reg_state == NETREG_REGISTERED ||
2354 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002355 ASSERT_RTNL();
2356
Tom Herbert1d24eb42010-11-21 13:17:27 +00002357 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2358 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002359 if (rc)
2360 return rc;
2361
John Fastabend4f57c082011-01-17 08:06:04 +00002362 if (dev->num_tc)
2363 netif_setup_tc(dev, txq);
2364
Alexander Duyck024e9672013-01-10 08:57:46 +00002365 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002366 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002367#ifdef CONFIG_XPS
2368 netif_reset_xps_queues_gt(dev, txq);
2369#endif
2370 }
John Fastabendf0796d52010-07-01 13:21:57 +00002371 }
Tom Herberte6484932010-10-18 18:04:39 +00002372
2373 dev->real_num_tx_queues = txq;
2374 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002375}
2376EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002377
Michael Daltona953be52014-01-16 22:23:28 -08002378#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002379/**
2380 * netif_set_real_num_rx_queues - set actual number of RX queues used
2381 * @dev: Network device
2382 * @rxq: Actual number of RX queues
2383 *
2384 * This must be called either with the rtnl_lock held or before
2385 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002386 * negative error code. If called before registration, it always
2387 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002388 */
2389int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2390{
2391 int rc;
2392
Tom Herbertbd25fa72010-10-18 18:00:16 +00002393 if (rxq < 1 || rxq > dev->num_rx_queues)
2394 return -EINVAL;
2395
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002396 if (dev->reg_state == NETREG_REGISTERED) {
2397 ASSERT_RTNL();
2398
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002399 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2400 rxq);
2401 if (rc)
2402 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002403 }
2404
2405 dev->real_num_rx_queues = rxq;
2406 return 0;
2407}
2408EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2409#endif
2410
Ben Hutchings2c530402012-07-10 10:55:09 +00002411/**
2412 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002413 *
2414 * This routine should set an upper limit on the number of RSS queues
2415 * used by default by multiqueue devices.
2416 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002417int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002418{
Hariprasad Shenai40e4e712016-06-08 18:09:08 +05302419 return is_kdump_kernel() ?
2420 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
Yuval Mintz16917b82012-07-01 03:18:50 +00002421}
2422EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2423
Eric Dumazet3bcb8462016-06-04 20:02:28 -07002424static void __netif_reschedule(struct Qdisc *q)
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002425{
2426 struct softnet_data *sd;
2427 unsigned long flags;
2428
2429 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002430 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002431 q->next_sched = NULL;
2432 *sd->output_queue_tailp = q;
2433 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002434 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2435 local_irq_restore(flags);
2436}
2437
David S. Miller37437bb2008-07-16 02:15:04 -07002438void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002439{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002440 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2441 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002442}
2443EXPORT_SYMBOL(__netif_schedule);
2444
Eric Dumazete6247022013-12-05 04:45:08 -08002445struct dev_kfree_skb_cb {
2446 enum skb_free_reason reason;
2447};
2448
2449static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002450{
Eric Dumazete6247022013-12-05 04:45:08 -08002451 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002452}
Denis Vlasenko56079432006-03-29 15:57:29 -08002453
John Fastabend46e5da40a2014-09-12 20:04:52 -07002454void netif_schedule_queue(struct netdev_queue *txq)
2455{
2456 rcu_read_lock();
2457 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2458 struct Qdisc *q = rcu_dereference(txq->qdisc);
2459
2460 __netif_schedule(q);
2461 }
2462 rcu_read_unlock();
2463}
2464EXPORT_SYMBOL(netif_schedule_queue);
2465
John Fastabend46e5da40a2014-09-12 20:04:52 -07002466void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2467{
2468 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2469 struct Qdisc *q;
2470
2471 rcu_read_lock();
2472 q = rcu_dereference(dev_queue->qdisc);
2473 __netif_schedule(q);
2474 rcu_read_unlock();
2475 }
2476}
2477EXPORT_SYMBOL(netif_tx_wake_queue);
2478
Eric Dumazete6247022013-12-05 04:45:08 -08002479void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2480{
2481 unsigned long flags;
2482
Myungho Jung98998862017-04-25 11:58:15 -07002483 if (unlikely(!skb))
2484 return;
2485
Eric Dumazete6247022013-12-05 04:45:08 -08002486 if (likely(atomic_read(&skb->users) == 1)) {
2487 smp_rmb();
2488 atomic_set(&skb->users, 0);
2489 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2490 return;
2491 }
2492 get_kfree_skb_cb(skb)->reason = reason;
2493 local_irq_save(flags);
2494 skb->next = __this_cpu_read(softnet_data.completion_queue);
2495 __this_cpu_write(softnet_data.completion_queue, skb);
2496 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2497 local_irq_restore(flags);
2498}
2499EXPORT_SYMBOL(__dev_kfree_skb_irq);
2500
2501void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002502{
2503 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002504 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002505 else
2506 dev_kfree_skb(skb);
2507}
Eric Dumazete6247022013-12-05 04:45:08 -08002508EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002509
2510
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002511/**
2512 * netif_device_detach - mark device as removed
2513 * @dev: network device
2514 *
2515 * Mark device as removed from system and therefore no longer available.
2516 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002517void netif_device_detach(struct net_device *dev)
2518{
2519 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2520 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002521 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002522 }
2523}
2524EXPORT_SYMBOL(netif_device_detach);
2525
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002526/**
2527 * netif_device_attach - mark device as attached
2528 * @dev: network device
2529 *
2530 * Mark device as attached from system and restart if needed.
2531 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002532void netif_device_attach(struct net_device *dev)
2533{
2534 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2535 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002536 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002537 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002538 }
2539}
2540EXPORT_SYMBOL(netif_device_attach);
2541
Jiri Pirko5605c762015-05-12 14:56:12 +02002542/*
2543 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2544 * to be used as a distribution range.
2545 */
2546u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2547 unsigned int num_tx_queues)
2548{
2549 u32 hash;
2550 u16 qoffset = 0;
2551 u16 qcount = num_tx_queues;
2552
2553 if (skb_rx_queue_recorded(skb)) {
2554 hash = skb_get_rx_queue(skb);
2555 while (unlikely(hash >= num_tx_queues))
2556 hash -= num_tx_queues;
2557 return hash;
2558 }
2559
2560 if (dev->num_tc) {
2561 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
tchardingf4563a72017-02-09 17:56:07 +11002562
Jiri Pirko5605c762015-05-12 14:56:12 +02002563 qoffset = dev->tc_to_txq[tc].offset;
2564 qcount = dev->tc_to_txq[tc].count;
2565 }
2566
2567 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2568}
2569EXPORT_SYMBOL(__skb_tx_hash);
2570
Ben Hutchings36c92472012-01-17 07:57:56 +00002571static void skb_warn_bad_offload(const struct sk_buff *skb)
2572{
Wei Tang84d15ae2016-06-16 21:17:49 +08002573 static const netdev_features_t null_features;
Ben Hutchings36c92472012-01-17 07:57:56 +00002574 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01002575 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00002576
Ben Greearc846ad92013-04-19 10:45:52 +00002577 if (!net_ratelimit())
2578 return;
2579
Bjørn Mork88ad4172015-11-16 19:16:40 +01002580 if (dev) {
2581 if (dev->dev.parent)
2582 name = dev_driver_string(dev->dev.parent);
2583 else
2584 name = netdev_name(dev);
2585 }
Ben Hutchings36c92472012-01-17 07:57:56 +00002586 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2587 "gso_type=%d ip_summed=%d\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01002588 name, dev ? &dev->features : &null_features,
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002589 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002590 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2591 skb_shinfo(skb)->gso_type, skb->ip_summed);
2592}
2593
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594/*
2595 * Invalidate hardware checksum when packet is to be mangled, and
2596 * complete checksum manually on outgoing path.
2597 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002598int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599{
Al Virod3bc23e2006-11-14 21:24:49 -08002600 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002601 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602
Patrick McHardy84fa7932006-08-29 16:44:56 -07002603 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002604 goto out_set_summed;
2605
2606 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002607 skb_warn_bad_offload(skb);
2608 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 }
2610
Eric Dumazetcef401d2013-01-25 20:34:37 +00002611 /* Before computing a checksum, we should make sure no frag could
2612 * be modified by an external entity : checksum could be wrong.
2613 */
2614 if (skb_has_shared_frag(skb)) {
2615 ret = __skb_linearize(skb);
2616 if (ret)
2617 goto out;
2618 }
2619
Michał Mirosław55508d62010-12-14 15:24:08 +00002620 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002621 BUG_ON(offset >= skb_headlen(skb));
2622 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2623
2624 offset += skb->csum_offset;
2625 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2626
2627 if (skb_cloned(skb) &&
2628 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2630 if (ret)
2631 goto out;
2632 }
2633
Eric Dumazet4f2e4ad2016-10-29 11:02:36 -07002634 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
Herbert Xua430a432006-07-08 13:34:56 -07002635out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002637out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 return ret;
2639}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002640EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
Davide Carattib72b5bf2017-05-18 15:44:38 +02002642int skb_crc32c_csum_help(struct sk_buff *skb)
2643{
2644 __le32 crc32c_csum;
2645 int ret = 0, offset, start;
2646
2647 if (skb->ip_summed != CHECKSUM_PARTIAL)
2648 goto out;
2649
2650 if (unlikely(skb_is_gso(skb)))
2651 goto out;
2652
2653 /* Before computing a checksum, we should make sure no frag could
2654 * be modified by an external entity : checksum could be wrong.
2655 */
2656 if (unlikely(skb_has_shared_frag(skb))) {
2657 ret = __skb_linearize(skb);
2658 if (ret)
2659 goto out;
2660 }
2661 start = skb_checksum_start_offset(skb);
2662 offset = start + offsetof(struct sctphdr, checksum);
2663 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2664 ret = -EINVAL;
2665 goto out;
2666 }
2667 if (skb_cloned(skb) &&
2668 !skb_clone_writable(skb, offset + sizeof(__le32))) {
2669 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2670 if (ret)
2671 goto out;
2672 }
2673 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2674 skb->len - start, ~(__u32)0,
2675 crc32c_csum_stub));
2676 *(__le32 *)(skb->data + offset) = crc32c_csum;
2677 skb->ip_summed = CHECKSUM_NONE;
Davide Carattidba00302017-05-18 15:44:40 +02002678 skb->csum_not_inet = 0;
Davide Carattib72b5bf2017-05-18 15:44:38 +02002679out:
2680 return ret;
2681}
2682
Vlad Yasevich53d64712014-03-27 17:26:18 -04002683__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002684{
2685 __be16 type = skb->protocol;
2686
Pravin B Shelar19acc322013-05-07 20:41:07 +00002687 /* Tunnel gso handlers can set protocol to ethernet. */
2688 if (type == htons(ETH_P_TEB)) {
2689 struct ethhdr *eth;
2690
2691 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2692 return 0;
2693
2694 eth = (struct ethhdr *)skb_mac_header(skb);
2695 type = eth->h_proto;
2696 }
2697
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002698 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002699}
2700
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002701/**
2702 * skb_mac_gso_segment - mac layer segmentation handler.
2703 * @skb: buffer to segment
2704 * @features: features for the output path (see dev->features)
2705 */
2706struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2707 netdev_features_t features)
2708{
2709 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2710 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002711 int vlan_depth = skb->mac_len;
2712 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002713
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002714 if (unlikely(!type))
2715 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002716
Vlad Yasevich53d64712014-03-27 17:26:18 -04002717 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002718
2719 rcu_read_lock();
2720 list_for_each_entry_rcu(ptype, &offload_base, list) {
2721 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002722 segs = ptype->callbacks.gso_segment(skb, features);
2723 break;
2724 }
2725 }
2726 rcu_read_unlock();
2727
2728 __skb_push(skb, skb->data - skb_mac_header(skb));
2729
2730 return segs;
2731}
2732EXPORT_SYMBOL(skb_mac_gso_segment);
2733
2734
Cong Wang12b00042013-02-05 16:36:38 +00002735/* openvswitch calls this on rx path, so we need a different check.
2736 */
2737static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2738{
2739 if (tx_path)
Eric Dumazet6e7bc472017-02-03 14:29:42 -08002740 return skb->ip_summed != CHECKSUM_PARTIAL &&
2741 skb->ip_summed != CHECKSUM_NONE;
2742
2743 return skb->ip_summed == CHECKSUM_NONE;
Cong Wang12b00042013-02-05 16:36:38 +00002744}
2745
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002746/**
Cong Wang12b00042013-02-05 16:36:38 +00002747 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002748 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002749 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002750 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002751 *
2752 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002753 *
2754 * It may return NULL if the skb requires no segmentation. This is
2755 * only possible when GSO is used for verifying header integrity.
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002756 *
2757 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002758 */
Cong Wang12b00042013-02-05 16:36:38 +00002759struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2760 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002761{
Eric Dumazetb2504a52017-01-31 10:20:32 -08002762 struct sk_buff *segs;
2763
Cong Wang12b00042013-02-05 16:36:38 +00002764 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002765 int err;
2766
Eric Dumazetb2504a52017-01-31 10:20:32 -08002767 /* We're going to init ->check field in TCP or UDP header */
françois romieua40e0a62014-07-15 23:55:35 +02002768 err = skb_cow_head(skb, 0);
2769 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002770 return ERR_PTR(err);
2771 }
2772
Alexander Duyck802ab552016-04-10 21:45:03 -04002773 /* Only report GSO partial support if it will enable us to
2774 * support segmentation on this frame without needing additional
2775 * work.
2776 */
2777 if (features & NETIF_F_GSO_PARTIAL) {
2778 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2779 struct net_device *dev = skb->dev;
2780
2781 partial_features |= dev->features & dev->gso_partial_features;
2782 if (!skb_gso_ok(skb, features | partial_features))
2783 features &= ~NETIF_F_GSO_PARTIAL;
2784 }
2785
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002786 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2787 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2788
Pravin B Shelar68c33162013-02-14 14:02:41 +00002789 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002790 SKB_GSO_CB(skb)->encap_level = 0;
2791
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002792 skb_reset_mac_header(skb);
2793 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002794
Eric Dumazetb2504a52017-01-31 10:20:32 -08002795 segs = skb_mac_gso_segment(skb, features);
2796
2797 if (unlikely(skb_needs_check(skb, tx_path)))
2798 skb_warn_bad_offload(skb);
2799
2800 return segs;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002801}
Cong Wang12b00042013-02-05 16:36:38 +00002802EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002803
Herbert Xufb286bb2005-11-10 13:01:24 -08002804/* Take action when hardware reception checksum errors are detected. */
2805#ifdef CONFIG_BUG
2806void netdev_rx_csum_fault(struct net_device *dev)
2807{
2808 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002809 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002810 dump_stack();
2811 }
2812}
2813EXPORT_SYMBOL(netdev_rx_csum_fault);
2814#endif
2815
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816/* Actually, we should eliminate this check as soon as we know, that:
2817 * 1. IOMMU is present and allows to map all the memory.
2818 * 2. No high memory really exists on this machine.
2819 */
2820
Florian Westphalc1e756b2014-05-05 15:00:44 +02002821static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002823#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 int i;
tchardingf4563a72017-02-09 17:56:07 +11002825
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002826 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002827 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2828 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tchardingf4563a72017-02-09 17:56:07 +11002829
Ian Campbellea2ab692011-08-22 23:44:58 +00002830 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002831 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002832 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002833 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002835 if (PCI_DMA_BUS_IS_PHYS) {
2836 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
Eric Dumazet9092c652010-04-02 13:34:49 -07002838 if (!pdev)
2839 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002840 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002841 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2842 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
tchardingf4563a72017-02-09 17:56:07 +11002843
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002844 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2845 return 1;
2846 }
2847 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002848#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 return 0;
2850}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851
Simon Horman3b392dd2014-06-04 08:53:17 +09002852/* If MPLS offload request, verify we are testing hardware MPLS features
2853 * instead of standard features for the netdev.
2854 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002855#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002856static netdev_features_t net_mpls_features(struct sk_buff *skb,
2857 netdev_features_t features,
2858 __be16 type)
2859{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002860 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002861 features &= skb->dev->mpls_features;
2862
2863 return features;
2864}
2865#else
2866static netdev_features_t net_mpls_features(struct sk_buff *skb,
2867 netdev_features_t features,
2868 __be16 type)
2869{
2870 return features;
2871}
2872#endif
2873
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002874static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002875 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002876{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002877 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002878 __be16 type;
2879
2880 type = skb_network_protocol(skb, &tmp);
2881 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002882
Ed Cashinc0d680e2012-09-19 15:49:00 +00002883 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002884 !can_checksum_protocol(features, type)) {
Alexander Duyck996e8022016-05-02 09:25:10 -07002885 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Jesse Grossf01a5232011-01-09 06:23:31 +00002886 }
Eric Dumazet7be2c822017-01-18 12:12:17 -08002887 if (illegal_highdma(skb->dev, skb))
2888 features &= ~NETIF_F_SG;
Jesse Grossf01a5232011-01-09 06:23:31 +00002889
2890 return features;
2891}
2892
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002893netdev_features_t passthru_features_check(struct sk_buff *skb,
2894 struct net_device *dev,
2895 netdev_features_t features)
2896{
2897 return features;
2898}
2899EXPORT_SYMBOL(passthru_features_check);
2900
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002901static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2902 struct net_device *dev,
2903 netdev_features_t features)
2904{
2905 return vlan_features_check(skb, features);
2906}
2907
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002908static netdev_features_t gso_features_check(const struct sk_buff *skb,
2909 struct net_device *dev,
2910 netdev_features_t features)
2911{
2912 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2913
2914 if (gso_segs > dev->gso_max_segs)
2915 return features & ~NETIF_F_GSO_MASK;
2916
Alexander Duyck802ab552016-04-10 21:45:03 -04002917 /* Support for GSO partial features requires software
2918 * intervention before we can actually process the packets
2919 * so we need to strip support for any partial features now
2920 * and we can pull them back in after we have partially
2921 * segmented the frame.
2922 */
2923 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2924 features &= ~dev->gso_partial_features;
2925
2926 /* Make sure to clear the IPv4 ID mangling feature if the
2927 * IPv4 header has the potential to be fragmented.
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002928 */
2929 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2930 struct iphdr *iph = skb->encapsulation ?
2931 inner_ip_hdr(skb) : ip_hdr(skb);
2932
2933 if (!(iph->frag_off & htons(IP_DF)))
2934 features &= ~NETIF_F_TSO_MANGLEID;
2935 }
2936
2937 return features;
2938}
2939
Florian Westphalc1e756b2014-05-05 15:00:44 +02002940netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002941{
Jesse Gross5f352272014-12-23 22:37:26 -08002942 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002943 netdev_features_t features = dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002944
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002945 if (skb_is_gso(skb))
2946 features = gso_features_check(skb, dev, features);
Ben Hutchings30b678d2012-07-30 15:57:00 +00002947
Jesse Gross5f352272014-12-23 22:37:26 -08002948 /* If encapsulation offload request, verify we are testing
2949 * hardware encapsulation features instead of standard
2950 * features for the netdev
2951 */
2952 if (skb->encapsulation)
2953 features &= dev->hw_enc_features;
2954
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09002955 if (skb_vlan_tagged(skb))
2956 features = netdev_intersect_features(features,
2957 dev->vlan_features |
2958 NETIF_F_HW_VLAN_CTAG_TX |
2959 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00002960
Jesse Gross5f352272014-12-23 22:37:26 -08002961 if (dev->netdev_ops->ndo_features_check)
2962 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2963 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002964 else
2965 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002966
Florian Westphalc1e756b2014-05-05 15:00:44 +02002967 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002968}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002969EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002970
David S. Miller2ea25512014-08-29 21:10:01 -07002971static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07002972 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002973{
David S. Miller2ea25512014-08-29 21:10:01 -07002974 unsigned int len;
2975 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08002976
Salam Noureddine7866a622015-01-27 11:35:48 -08002977 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07002978 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00002979
David S. Miller2ea25512014-08-29 21:10:01 -07002980 len = skb->len;
2981 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07002982 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07002983 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00002984
Patrick McHardy572a9d72009-11-10 06:14:14 +00002985 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002986}
David S. Miller2ea25512014-08-29 21:10:01 -07002987
David S. Miller8dcda222014-09-01 15:06:40 -07002988struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2989 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07002990{
2991 struct sk_buff *skb = first;
2992 int rc = NETDEV_TX_OK;
2993
2994 while (skb) {
2995 struct sk_buff *next = skb->next;
2996
2997 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07002998 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07002999 if (unlikely(!dev_xmit_complete(rc))) {
3000 skb->next = next;
3001 goto out;
3002 }
3003
3004 skb = next;
3005 if (netif_xmit_stopped(txq) && skb) {
3006 rc = NETDEV_TX_BUSY;
3007 break;
3008 }
3009 }
3010
3011out:
3012 *ret = rc;
3013 return skb;
3014}
3015
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07003016static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3017 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07003018{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003019 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01003020 !vlan_hw_offload_capable(features, skb->vlan_proto))
3021 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07003022 return skb;
3023}
3024
Davide Caratti43c26a12017-05-18 15:44:41 +02003025int skb_csum_hwoffload_help(struct sk_buff *skb,
3026 const netdev_features_t features)
3027{
3028 if (unlikely(skb->csum_not_inet))
3029 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3030 skb_crc32c_csum_help(skb);
3031
3032 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3033}
3034EXPORT_SYMBOL(skb_csum_hwoffload_help);
3035
Eric Dumazet55a93b32014-10-03 15:31:07 -07003036static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
David S. Millereae3f882014-08-30 15:17:13 -07003037{
3038 netdev_features_t features;
3039
David S. Millereae3f882014-08-30 15:17:13 -07003040 features = netif_skb_features(skb);
3041 skb = validate_xmit_vlan(skb, features);
3042 if (unlikely(!skb))
3043 goto out_null;
3044
Johannes Berg8b86a612015-04-17 15:45:04 +02003045 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07003046 struct sk_buff *segs;
3047
3048 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08003049 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08003050 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08003051 } else if (segs) {
3052 consume_skb(skb);
3053 skb = segs;
3054 }
David S. Millereae3f882014-08-30 15:17:13 -07003055 } else {
3056 if (skb_needs_linearize(skb, features) &&
3057 __skb_linearize(skb))
3058 goto out_kfree_skb;
3059
Steffen Klassertf6e27112017-04-14 10:07:28 +02003060 if (validate_xmit_xfrm(skb, features))
3061 goto out_kfree_skb;
3062
David S. Millereae3f882014-08-30 15:17:13 -07003063 /* If packet is not checksummed and device does not
3064 * support checksumming for this protocol, complete
3065 * checksumming here.
3066 */
3067 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3068 if (skb->encapsulation)
3069 skb_set_inner_transport_header(skb,
3070 skb_checksum_start_offset(skb));
3071 else
3072 skb_set_transport_header(skb,
3073 skb_checksum_start_offset(skb));
Davide Caratti43c26a12017-05-18 15:44:41 +02003074 if (skb_csum_hwoffload_help(skb, features))
David S. Millereae3f882014-08-30 15:17:13 -07003075 goto out_kfree_skb;
3076 }
3077 }
3078
3079 return skb;
3080
3081out_kfree_skb:
3082 kfree_skb(skb);
3083out_null:
Eric Dumazetd21fd632016-04-12 21:50:07 -07003084 atomic_long_inc(&dev->tx_dropped);
David S. Millereae3f882014-08-30 15:17:13 -07003085 return NULL;
3086}
3087
Eric Dumazet55a93b32014-10-03 15:31:07 -07003088struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3089{
3090 struct sk_buff *next, *head = NULL, *tail;
3091
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003092 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07003093 next = skb->next;
3094 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003095
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003096 /* in case skb wont be segmented, point to itself */
3097 skb->prev = skb;
3098
3099 skb = validate_xmit_skb(skb, dev);
3100 if (!skb)
3101 continue;
3102
3103 if (!head)
3104 head = skb;
3105 else
3106 tail->next = skb;
3107 /* If skb was segmented, skb->prev points to
3108 * the last segment. If not, it still contains skb.
3109 */
3110 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003111 }
3112 return head;
3113}
Willem de Bruijn104ba782016-10-26 11:23:07 -04003114EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003115
Eric Dumazet1def9232013-01-10 12:36:42 +00003116static void qdisc_pkt_len_init(struct sk_buff *skb)
3117{
3118 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3119
3120 qdisc_skb_cb(skb)->pkt_len = skb->len;
3121
3122 /* To get more precise estimation of bytes sent on wire,
3123 * we add to pkt_len the headers size of all segments
3124 */
3125 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08003126 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00003127 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00003128
Eric Dumazet757b8b12013-01-15 21:14:21 -08003129 /* mac layer + network layer */
3130 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3131
3132 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00003133 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3134 hdr_len += tcp_hdrlen(skb);
3135 else
3136 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00003137
3138 if (shinfo->gso_type & SKB_GSO_DODGY)
3139 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3140 shinfo->gso_size);
3141
3142 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00003143 }
3144}
3145
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003146static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3147 struct net_device *dev,
3148 struct netdev_queue *txq)
3149{
3150 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazet520ac302016-06-21 23:16:49 -07003151 struct sk_buff *to_free = NULL;
Eric Dumazeta2da5702011-01-20 03:48:19 +00003152 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003153 int rc;
3154
Eric Dumazeta2da5702011-01-20 03:48:19 +00003155 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003156 /*
3157 * Heuristic to force contended enqueues to serialize on a
3158 * separate lock before trying to get qdisc main lock.
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07003159 * This permits qdisc->running owner to get the lock more
Ying Xue9bf2b8c2014-06-26 15:56:31 +08003160 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07003161 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00003162 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003163 if (unlikely(contended))
3164 spin_lock(&q->busylock);
3165
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003166 spin_lock(root_lock);
3167 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
Eric Dumazet520ac302016-06-21 23:16:49 -07003168 __qdisc_drop(skb, &to_free);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003169 rc = NET_XMIT_DROP;
3170 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07003171 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003172 /*
3173 * This is a work-conserving queue; there are no old skbs
3174 * waiting to be sent out; and the qdisc is not running -
3175 * xmit the skb directly.
3176 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003177
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003178 qdisc_bstats_update(q, skb);
3179
Eric Dumazet55a93b32014-10-03 15:31:07 -07003180 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07003181 if (unlikely(contended)) {
3182 spin_unlock(&q->busylock);
3183 contended = false;
3184 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003185 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003186 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07003187 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003188
3189 rc = NET_XMIT_SUCCESS;
3190 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -07003191 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07003192 if (qdisc_run_begin(q)) {
3193 if (unlikely(contended)) {
3194 spin_unlock(&q->busylock);
3195 contended = false;
3196 }
3197 __qdisc_run(q);
3198 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003199 }
3200 spin_unlock(root_lock);
Eric Dumazet520ac302016-06-21 23:16:49 -07003201 if (unlikely(to_free))
3202 kfree_skb_list(to_free);
Eric Dumazet79640a42010-06-02 05:09:29 -07003203 if (unlikely(contended))
3204 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003205 return rc;
3206}
3207
Daniel Borkmann86f85152013-12-29 17:27:11 +01003208#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00003209static void skb_update_prio(struct sk_buff *skb)
3210{
Igor Maravic6977a792011-11-25 07:44:54 +00003211 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00003212
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003213 if (!skb->priority && skb->sk && map) {
Tejun Heo2a56a1f2015-12-07 17:38:52 -05003214 unsigned int prioidx =
3215 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003216
3217 if (prioidx < map->priomap_len)
3218 skb->priority = map->priomap[prioidx];
3219 }
Neil Horman5bc14212011-11-22 05:10:51 +00003220}
3221#else
3222#define skb_update_prio(skb)
3223#endif
3224
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02003225DEFINE_PER_CPU(int, xmit_recursion);
3226EXPORT_SYMBOL(xmit_recursion);
3227
Dave Jonesd29f7492008-07-22 14:09:06 -07003228/**
Michel Machado95603e22012-06-12 10:16:35 +00003229 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003230 * @net: network namespace this loopback is happening in
3231 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00003232 * @skb: buffer to transmit
3233 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003234int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00003235{
3236 skb_reset_mac_header(skb);
3237 __skb_pull(skb, skb_network_offset(skb));
3238 skb->pkt_type = PACKET_LOOPBACK;
3239 skb->ip_summed = CHECKSUM_UNNECESSARY;
3240 WARN_ON(!skb_dst(skb));
3241 skb_dst_force(skb);
3242 netif_rx_ni(skb);
3243 return 0;
3244}
3245EXPORT_SYMBOL(dev_loopback_xmit);
3246
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003247#ifdef CONFIG_NET_EGRESS
3248static struct sk_buff *
3249sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3250{
3251 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3252 struct tcf_result cl_res;
3253
3254 if (!cl)
3255 return skb;
3256
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003257 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003258 qdisc_bstats_cpu_update(cl->q, skb);
3259
Jiri Pirko87d83092017-05-17 11:07:54 +02003260 switch (tcf_classify(skb, cl, &cl_res, false)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003261 case TC_ACT_OK:
3262 case TC_ACT_RECLASSIFY:
3263 skb->tc_index = TC_H_MIN(cl_res.classid);
3264 break;
3265 case TC_ACT_SHOT:
3266 qdisc_qstats_cpu_drop(cl->q);
3267 *ret = NET_XMIT_DROP;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003268 kfree_skb(skb);
3269 return NULL;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003270 case TC_ACT_STOLEN:
3271 case TC_ACT_QUEUED:
3272 *ret = NET_XMIT_SUCCESS;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003273 consume_skb(skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003274 return NULL;
3275 case TC_ACT_REDIRECT:
3276 /* No need to push/pop skb's mac_header here on egress! */
3277 skb_do_redirect(skb);
3278 *ret = NET_XMIT_SUCCESS;
3279 return NULL;
3280 default:
3281 break;
3282 }
3283
3284 return skb;
3285}
3286#endif /* CONFIG_NET_EGRESS */
3287
Jiri Pirko638b2a62015-05-12 14:56:13 +02003288static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3289{
3290#ifdef CONFIG_XPS
3291 struct xps_dev_maps *dev_maps;
3292 struct xps_map *map;
3293 int queue_index = -1;
3294
3295 rcu_read_lock();
3296 dev_maps = rcu_dereference(dev->xps_maps);
3297 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04003298 unsigned int tci = skb->sender_cpu - 1;
3299
3300 if (dev->num_tc) {
3301 tci *= dev->num_tc;
3302 tci += netdev_get_prio_tc_map(dev, skb->priority);
3303 }
3304
3305 map = rcu_dereference(dev_maps->cpu_map[tci]);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003306 if (map) {
3307 if (map->len == 1)
3308 queue_index = map->queues[0];
3309 else
3310 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3311 map->len)];
3312 if (unlikely(queue_index >= dev->real_num_tx_queues))
3313 queue_index = -1;
3314 }
3315 }
3316 rcu_read_unlock();
3317
3318 return queue_index;
3319#else
3320 return -1;
3321#endif
3322}
3323
3324static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3325{
3326 struct sock *sk = skb->sk;
3327 int queue_index = sk_tx_queue_get(sk);
3328
3329 if (queue_index < 0 || skb->ooo_okay ||
3330 queue_index >= dev->real_num_tx_queues) {
3331 int new_index = get_xps_queue(dev, skb);
tchardingf4563a72017-02-09 17:56:07 +11003332
Jiri Pirko638b2a62015-05-12 14:56:13 +02003333 if (new_index < 0)
3334 new_index = skb_tx_hash(dev, skb);
3335
3336 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003337 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003338 rcu_access_pointer(sk->sk_dst_cache))
3339 sk_tx_queue_set(sk, new_index);
3340
3341 queue_index = new_index;
3342 }
3343
3344 return queue_index;
3345}
3346
3347struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3348 struct sk_buff *skb,
3349 void *accel_priv)
3350{
3351 int queue_index = 0;
3352
3353#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08003354 u32 sender_cpu = skb->sender_cpu - 1;
3355
3356 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003357 skb->sender_cpu = raw_smp_processor_id() + 1;
3358#endif
3359
3360 if (dev->real_num_tx_queues != 1) {
3361 const struct net_device_ops *ops = dev->netdev_ops;
tchardingf4563a72017-02-09 17:56:07 +11003362
Jiri Pirko638b2a62015-05-12 14:56:13 +02003363 if (ops->ndo_select_queue)
3364 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3365 __netdev_pick_tx);
3366 else
3367 queue_index = __netdev_pick_tx(dev, skb);
3368
3369 if (!accel_priv)
3370 queue_index = netdev_cap_txqueue(dev, queue_index);
3371 }
3372
3373 skb_set_queue_mapping(skb, queue_index);
3374 return netdev_get_tx_queue(dev, queue_index);
3375}
3376
Michel Machado95603e22012-06-12 10:16:35 +00003377/**
Jason Wang9d08dd32014-01-20 11:25:13 +08003378 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07003379 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08003380 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07003381 *
3382 * Queue a buffer for transmission to a network device. The caller must
3383 * have set the device and priority and built the buffer before calling
3384 * this function. The function can be called from an interrupt.
3385 *
3386 * A negative errno code is returned on a failure. A success does not
3387 * guarantee the frame will be transmitted as it may be dropped due
3388 * to congestion or traffic shaping.
3389 *
3390 * -----------------------------------------------------------------------------------
3391 * I notice this method can also return errors from the queue disciplines,
3392 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3393 * be positive.
3394 *
3395 * Regardless of the return value, the skb is consumed, so it is currently
3396 * difficult to retry a send to this method. (You can bump the ref count
3397 * before sending to hold a reference for retry if you are careful.)
3398 *
3399 * When calling this method, interrupts MUST be enabled. This is because
3400 * the BH enable code must have IRQs enabled so that it will not deadlock.
3401 * --BLG
3402 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05303403static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404{
3405 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07003406 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 struct Qdisc *q;
3408 int rc = -ENOMEM;
3409
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00003410 skb_reset_mac_header(skb);
3411
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003412 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3413 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3414
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003415 /* Disable soft irqs for various locks below. Also
3416 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003418 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
Neil Horman5bc14212011-11-22 05:10:51 +00003420 skb_update_prio(skb);
3421
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003422 qdisc_pkt_len_init(skb);
3423#ifdef CONFIG_NET_CLS_ACT
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003424 skb->tc_at_ingress = 0;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003425# ifdef CONFIG_NET_EGRESS
3426 if (static_key_false(&egress_needed)) {
3427 skb = sch_handle_egress(skb, &rc, dev);
3428 if (!skb)
3429 goto out;
3430 }
3431# endif
3432#endif
Eric Dumazet02875872014-10-05 18:38:35 -07003433 /* If device/qdisc don't need skb->dst, release it right now while
3434 * its hot in this cpu cache.
3435 */
3436 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3437 skb_dst_drop(skb);
3438 else
3439 skb_dst_force(skb);
3440
Jason Wangf663dd92014-01-10 16:18:26 +08003441 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08003442 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07003443
Koki Sanagicf66ba52010-08-23 18:45:02 +09003444 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003446 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07003447 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 }
3449
3450 /* The device has no queue. Common case for software devices:
tchardingeb13da12017-02-09 17:56:06 +11003451 * loopback, all the sorts of tunnels...
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452
tchardingeb13da12017-02-09 17:56:06 +11003453 * Really, it is unlikely that netif_tx_lock protection is necessary
3454 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3455 * counters.)
3456 * However, it is possible, that they rely on protection
3457 * made by us here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
tchardingeb13da12017-02-09 17:56:06 +11003459 * Check this and shot the lock. It is not prone from deadlocks.
3460 *Either shot noqueue qdisc, it is even simpler 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 */
3462 if (dev->flags & IFF_UP) {
3463 int cpu = smp_processor_id(); /* ok because BHs are off */
3464
David S. Millerc773e842008-07-08 23:13:53 -07003465 if (txq->xmit_lock_owner != cpu) {
Daniel Borkmanna70b5062016-06-10 21:19:06 +02003466 if (unlikely(__this_cpu_read(xmit_recursion) >
3467 XMIT_RECURSION_LIMIT))
Eric Dumazet745e20f2010-09-29 13:23:09 -07003468 goto recursion_alert;
3469
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003470 skb = validate_xmit_skb(skb, dev);
3471 if (!skb)
Eric Dumazetd21fd632016-04-12 21:50:07 -07003472 goto out;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003473
David S. Millerc773e842008-07-08 23:13:53 -07003474 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
Tom Herbert734664982011-11-28 16:32:44 +00003476 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07003477 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07003478 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07003479 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00003480 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003481 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 goto out;
3483 }
3484 }
David S. Millerc773e842008-07-08 23:13:53 -07003485 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003486 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3487 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 } else {
3489 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003490 * unfortunately
3491 */
3492recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003493 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3494 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 }
3496 }
3497
3498 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07003499 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500
Eric Dumazet015f0682014-03-27 08:45:56 -07003501 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003502 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 return rc;
3504out:
Herbert Xud4828d82006-06-22 02:28:18 -07003505 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506 return rc;
3507}
Jason Wangf663dd92014-01-10 16:18:26 +08003508
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003509int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003510{
3511 return __dev_queue_xmit(skb, NULL);
3512}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003513EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514
Jason Wangf663dd92014-01-10 16:18:26 +08003515int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3516{
3517 return __dev_queue_xmit(skb, accel_priv);
3518}
3519EXPORT_SYMBOL(dev_queue_xmit_accel);
3520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
tchardingeb13da12017-02-09 17:56:06 +11003522/*************************************************************************
3523 * Receiver routines
3524 *************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003526int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003527EXPORT_SYMBOL(netdev_max_backlog);
3528
Eric Dumazet3b098e22010-05-15 23:57:10 -07003529int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003530int netdev_budget __read_mostly = 300;
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04003531unsigned int __read_mostly netdev_budget_usecs = 2000;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01003532int weight_p __read_mostly = 64; /* old backlog weight */
3533int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3534int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3535int dev_rx_weight __read_mostly = 64;
3536int dev_tx_weight __read_mostly = 64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003538/* Called with irq disabled */
3539static inline void ____napi_schedule(struct softnet_data *sd,
3540 struct napi_struct *napi)
3541{
3542 list_add_tail(&napi->poll_list, &sd->poll_list);
3543 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3544}
3545
Eric Dumazetdf334542010-03-24 19:13:54 +00003546#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003547
3548/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003549struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003550EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003551u32 rps_cpu_mask __read_mostly;
3552EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003553
Ingo Molnarc5905af2012-02-24 08:31:31 +01003554struct static_key rps_needed __read_mostly;
Jason Wang3df97ba2016-04-25 23:13:42 -04003555EXPORT_SYMBOL(rps_needed);
Eric Dumazet13bfff22016-12-07 08:29:10 -08003556struct static_key rfs_needed __read_mostly;
3557EXPORT_SYMBOL(rfs_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +00003558
Ben Hutchingsc4454772011-01-19 11:03:53 +00003559static struct rps_dev_flow *
3560set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3561 struct rps_dev_flow *rflow, u16 next_cpu)
3562{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003563 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003564#ifdef CONFIG_RFS_ACCEL
3565 struct netdev_rx_queue *rxqueue;
3566 struct rps_dev_flow_table *flow_table;
3567 struct rps_dev_flow *old_rflow;
3568 u32 flow_id;
3569 u16 rxq_index;
3570 int rc;
3571
3572 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003573 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3574 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003575 goto out;
3576 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3577 if (rxq_index == skb_get_rx_queue(skb))
3578 goto out;
3579
3580 rxqueue = dev->_rx + rxq_index;
3581 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3582 if (!flow_table)
3583 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003584 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003585 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3586 rxq_index, flow_id);
3587 if (rc < 0)
3588 goto out;
3589 old_rflow = rflow;
3590 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003591 rflow->filter = rc;
3592 if (old_rflow->filter == rflow->filter)
3593 old_rflow->filter = RPS_NO_FILTER;
3594 out:
3595#endif
3596 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003597 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003598 }
3599
Ben Hutchings09994d12011-10-03 04:42:46 +00003600 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003601 return rflow;
3602}
3603
Tom Herbert0a9627f2010-03-16 08:03:29 +00003604/*
3605 * get_rps_cpu is called from netif_receive_skb and returns the target
3606 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003607 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003608 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003609static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3610 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003611{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003612 const struct rps_sock_flow_table *sock_flow_table;
3613 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003614 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003615 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003616 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003617 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003618 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003619
Tom Herbert0a9627f2010-03-16 08:03:29 +00003620 if (skb_rx_queue_recorded(skb)) {
3621 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003622
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003623 if (unlikely(index >= dev->real_num_rx_queues)) {
3624 WARN_ONCE(dev->real_num_rx_queues > 1,
3625 "%s received packet on queue %u, but number "
3626 "of RX queues is %u\n",
3627 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003628 goto done;
3629 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003630 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003631 }
3632
Eric Dumazet567e4b72015-02-06 12:59:01 -08003633 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3634
3635 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3636 map = rcu_dereference(rxqueue->rps_map);
3637 if (!flow_table && !map)
3638 goto done;
3639
Changli Gao2d47b452010-08-17 19:00:56 +00003640 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003641 hash = skb_get_hash(skb);
3642 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003643 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003644
Tom Herbertfec5e652010-04-16 16:01:27 -07003645 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3646 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003647 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003648 u32 next_cpu;
3649 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003650
Eric Dumazet567e4b72015-02-06 12:59:01 -08003651 /* First check into global flow table if there is a match */
3652 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3653 if ((ident ^ hash) & ~rps_cpu_mask)
3654 goto try_rps;
3655
3656 next_cpu = ident & rps_cpu_mask;
3657
3658 /* OK, now we know there is a match,
3659 * we can look at the local (per receive queue) flow table
3660 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003661 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003662 tcpu = rflow->cpu;
3663
Tom Herbertfec5e652010-04-16 16:01:27 -07003664 /*
3665 * If the desired CPU (where last recvmsg was done) is
3666 * different from current CPU (one in the rx-queue flow
3667 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003668 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003669 * - Current CPU is offline.
3670 * - The current CPU's queue tail has advanced beyond the
3671 * last packet that was enqueued using this table entry.
3672 * This guarantees that all previous packets for the flow
3673 * have been dequeued, thus preserving in order delivery.
3674 */
3675 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003676 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003677 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003678 rflow->last_qtail)) >= 0)) {
3679 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003680 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003681 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003682
Eric Dumazeta31196b2015-04-25 09:35:24 -07003683 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003684 *rflowp = rflow;
3685 cpu = tcpu;
3686 goto done;
3687 }
3688 }
3689
Eric Dumazet567e4b72015-02-06 12:59:01 -08003690try_rps:
3691
Tom Herbert0a9627f2010-03-16 08:03:29 +00003692 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003693 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003694 if (cpu_online(tcpu)) {
3695 cpu = tcpu;
3696 goto done;
3697 }
3698 }
3699
3700done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003701 return cpu;
3702}
3703
Ben Hutchingsc4454772011-01-19 11:03:53 +00003704#ifdef CONFIG_RFS_ACCEL
3705
3706/**
3707 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3708 * @dev: Device on which the filter was set
3709 * @rxq_index: RX queue index
3710 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3711 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3712 *
3713 * Drivers that implement ndo_rx_flow_steer() should periodically call
3714 * this function for each installed filter and remove the filters for
3715 * which it returns %true.
3716 */
3717bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3718 u32 flow_id, u16 filter_id)
3719{
3720 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3721 struct rps_dev_flow_table *flow_table;
3722 struct rps_dev_flow *rflow;
3723 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003724 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003725
3726 rcu_read_lock();
3727 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3728 if (flow_table && flow_id <= flow_table->mask) {
3729 rflow = &flow_table->flows[flow_id];
3730 cpu = ACCESS_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003731 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003732 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3733 rflow->last_qtail) <
3734 (int)(10 * flow_table->mask)))
3735 expire = false;
3736 }
3737 rcu_read_unlock();
3738 return expire;
3739}
3740EXPORT_SYMBOL(rps_may_expire_flow);
3741
3742#endif /* CONFIG_RFS_ACCEL */
3743
Tom Herbert0a9627f2010-03-16 08:03:29 +00003744/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003745static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003746{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003747 struct softnet_data *sd = data;
3748
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003749 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003750 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003751}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003752
Tom Herbertfec5e652010-04-16 16:01:27 -07003753#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003754
3755/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003756 * Check if this softnet_data structure is another cpu one
3757 * If yes, queue it to our IPI list and return 1
3758 * If no, return 0
3759 */
3760static int rps_ipi_queued(struct softnet_data *sd)
3761{
3762#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003763 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003764
3765 if (sd != mysd) {
3766 sd->rps_ipi_next = mysd->rps_ipi_list;
3767 mysd->rps_ipi_list = sd;
3768
3769 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3770 return 1;
3771 }
3772#endif /* CONFIG_RPS */
3773 return 0;
3774}
3775
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003776#ifdef CONFIG_NET_FLOW_LIMIT
3777int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3778#endif
3779
3780static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3781{
3782#ifdef CONFIG_NET_FLOW_LIMIT
3783 struct sd_flow_limit *fl;
3784 struct softnet_data *sd;
3785 unsigned int old_flow, new_flow;
3786
3787 if (qlen < (netdev_max_backlog >> 1))
3788 return false;
3789
Christoph Lameter903ceff2014-08-17 12:30:35 -05003790 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003791
3792 rcu_read_lock();
3793 fl = rcu_dereference(sd->flow_limit);
3794 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003795 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003796 old_flow = fl->history[fl->history_head];
3797 fl->history[fl->history_head] = new_flow;
3798
3799 fl->history_head++;
3800 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3801
3802 if (likely(fl->buckets[old_flow]))
3803 fl->buckets[old_flow]--;
3804
3805 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3806 fl->count++;
3807 rcu_read_unlock();
3808 return true;
3809 }
3810 }
3811 rcu_read_unlock();
3812#endif
3813 return false;
3814}
3815
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003816/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003817 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3818 * queue (may be a remote CPU queue).
3819 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003820static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3821 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003822{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003823 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003824 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003825 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003826
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003827 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003828
3829 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003830
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003831 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003832 if (!netif_running(skb->dev))
3833 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003834 qlen = skb_queue_len(&sd->input_pkt_queue);
3835 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003836 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003837enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003838 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003839 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003840 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003841 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003842 return NET_RX_SUCCESS;
3843 }
3844
Eric Dumazetebda37c22010-05-06 23:51:21 +00003845 /* Schedule NAPI for backlog device
3846 * We can use non atomic operation since we own the queue lock
3847 */
3848 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003849 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003850 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003851 }
3852 goto enqueue;
3853 }
3854
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003855drop:
Changli Gaodee42872010-05-02 05:42:16 +00003856 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003857 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003858
Tom Herbert0a9627f2010-03-16 08:03:29 +00003859 local_irq_restore(flags);
3860
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003861 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003862 kfree_skb(skb);
3863 return NET_RX_DROP;
3864}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003866static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003868 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869
Eric Dumazet588f0332011-11-15 04:12:55 +00003870 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871
Koki Sanagicf66ba52010-08-23 18:45:02 +09003872 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003873#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003874 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003875 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003876 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877
Changli Gaocece1942010-08-07 20:35:43 -07003878 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003879 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003880
3881 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003882 if (cpu < 0)
3883 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003884
3885 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3886
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003887 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003888 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003889 } else
3890#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003891 {
3892 unsigned int qtail;
tchardingf4563a72017-02-09 17:56:07 +11003893
Tom Herbertfec5e652010-04-16 16:01:27 -07003894 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3895 put_cpu();
3896 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003897 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003899
3900/**
3901 * netif_rx - post buffer to the network code
3902 * @skb: buffer to post
3903 *
3904 * This function receives a packet from a device driver and queues it for
3905 * the upper (protocol) levels to process. It always succeeds. The buffer
3906 * may be dropped during processing for congestion control or by the
3907 * protocol layers.
3908 *
3909 * return values:
3910 * NET_RX_SUCCESS (no congestion)
3911 * NET_RX_DROP (packet was dropped)
3912 *
3913 */
3914
3915int netif_rx(struct sk_buff *skb)
3916{
3917 trace_netif_rx_entry(skb);
3918
3919 return netif_rx_internal(skb);
3920}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003921EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922
3923int netif_rx_ni(struct sk_buff *skb)
3924{
3925 int err;
3926
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003927 trace_netif_rx_ni_entry(skb);
3928
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003930 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 if (local_softirq_pending())
3932 do_softirq();
3933 preempt_enable();
3934
3935 return err;
3936}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937EXPORT_SYMBOL(netif_rx_ni);
3938
Emese Revfy0766f782016-06-20 20:42:34 +02003939static __latent_entropy void net_tx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940{
Christoph Lameter903ceff2014-08-17 12:30:35 -05003941 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942
3943 if (sd->completion_queue) {
3944 struct sk_buff *clist;
3945
3946 local_irq_disable();
3947 clist = sd->completion_queue;
3948 sd->completion_queue = NULL;
3949 local_irq_enable();
3950
3951 while (clist) {
3952 struct sk_buff *skb = clist;
tchardingf4563a72017-02-09 17:56:07 +11003953
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 clist = clist->next;
3955
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003956 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003957 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3958 trace_consume_skb(skb);
3959 else
3960 trace_kfree_skb(skb, net_tx_action);
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01003961
3962 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
3963 __kfree_skb(skb);
3964 else
3965 __kfree_skb_defer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 }
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01003967
3968 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969 }
3970
3971 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003972 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973
3974 local_irq_disable();
3975 head = sd->output_queue;
3976 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003977 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 local_irq_enable();
3979
3980 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003981 struct Qdisc *q = head;
3982 spinlock_t *root_lock;
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 head = head->next_sched;
3985
David S. Miller5fb66222008-08-02 20:02:43 -07003986 root_lock = qdisc_lock(q);
Eric Dumazet3bcb8462016-06-04 20:02:28 -07003987 spin_lock(root_lock);
3988 /* We need to make sure head->next_sched is read
3989 * before clearing __QDISC_STATE_SCHED
3990 */
3991 smp_mb__before_atomic();
3992 clear_bit(__QDISC_STATE_SCHED, &q->state);
3993 qdisc_run(q);
3994 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 }
3996 }
3997}
3998
Javier Martinez Canillas181402a2016-09-09 08:43:15 -04003999#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
Michał Mirosławda678292009-06-05 05:35:28 +00004000/* This hook is defined here for ATM LANE */
4001int (*br_fdb_test_addr_hook)(struct net_device *dev,
4002 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07004003EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00004004#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004006static inline struct sk_buff *
4007sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4008 struct net_device *orig_dev)
Herbert Xuf697c3e2007-10-14 00:38:47 -07004009{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004010#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004011 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
4012 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00004013
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004014 /* If there's at least one ingress present somewhere (so
4015 * we get here via enabled static key), remaining devices
4016 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004017 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004018 */
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004019 if (!cl)
Daniel Borkmann45771392015-04-10 23:07:54 +02004020 return skb;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004021 if (*pt_prev) {
4022 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4023 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004024 }
4025
Florian Westphal33654952015-05-14 00:36:28 +02004026 qdisc_skb_cb(skb)->pkt_len = skb->len;
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05004027 skb->tc_at_ingress = 1;
Eric Dumazet24ea5912015-07-06 05:18:03 -07004028 qdisc_bstats_cpu_update(cl->q, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004029
Jiri Pirko87d83092017-05-17 11:07:54 +02004030 switch (tcf_classify(skb, cl, &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004031 case TC_ACT_OK:
4032 case TC_ACT_RECLASSIFY:
4033 skb->tc_index = TC_H_MIN(cl_res.classid);
4034 break;
4035 case TC_ACT_SHOT:
Eric Dumazet24ea5912015-07-06 05:18:03 -07004036 qdisc_qstats_cpu_drop(cl->q);
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004037 kfree_skb(skb);
4038 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004039 case TC_ACT_STOLEN:
4040 case TC_ACT_QUEUED:
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004041 consume_skb(skb);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004042 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07004043 case TC_ACT_REDIRECT:
4044 /* skb_mac_header check was done by cls/act_bpf, so
4045 * we can safely push the L2 header back before
4046 * redirecting to another netdev
4047 */
4048 __skb_push(skb, skb->mac_len);
4049 skb_do_redirect(skb);
4050 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004051 default:
4052 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004053 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004054#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07004055 return skb;
4056}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004058/**
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07004059 * netdev_is_rx_handler_busy - check if receive handler is registered
4060 * @dev: device to check
4061 *
4062 * Check if a receive handler is already registered for a given device.
4063 * Return true if there one.
4064 *
4065 * The caller must hold the rtnl_mutex.
4066 */
4067bool netdev_is_rx_handler_busy(struct net_device *dev)
4068{
4069 ASSERT_RTNL();
4070 return dev && rtnl_dereference(dev->rx_handler);
4071}
4072EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4073
4074/**
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004075 * netdev_rx_handler_register - register receive handler
4076 * @dev: device to register a handler for
4077 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00004078 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004079 *
Masanari Iidae2278672014-02-18 22:54:36 +09004080 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004081 * called from __netif_receive_skb. A negative errno code is returned
4082 * on a failure.
4083 *
4084 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004085 *
4086 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004087 */
4088int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00004089 rx_handler_func_t *rx_handler,
4090 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004091{
Mahesh Bandewar1b7cd002017-01-18 15:02:49 -08004092 if (netdev_is_rx_handler_busy(dev))
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004093 return -EBUSY;
4094
Eric Dumazet00cfec32013-03-29 03:01:22 +00004095 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00004096 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004097 rcu_assign_pointer(dev->rx_handler, rx_handler);
4098
4099 return 0;
4100}
4101EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4102
4103/**
4104 * netdev_rx_handler_unregister - unregister receive handler
4105 * @dev: device to unregister a handler from
4106 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00004107 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004108 *
4109 * The caller must hold the rtnl_mutex.
4110 */
4111void netdev_rx_handler_unregister(struct net_device *dev)
4112{
4113
4114 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004115 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00004116 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4117 * section has a guarantee to see a non NULL rx_handler_data
4118 * as well.
4119 */
4120 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004121 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004122}
4123EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4124
Mel Gormanb4b9e352012-07-31 16:44:26 -07004125/*
4126 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4127 * the special handling of PFMEMALLOC skbs.
4128 */
4129static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4130{
4131 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07004132 case htons(ETH_P_ARP):
4133 case htons(ETH_P_IP):
4134 case htons(ETH_P_IPV6):
4135 case htons(ETH_P_8021Q):
4136 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07004137 return true;
4138 default:
4139 return false;
4140 }
4141}
4142
Pablo Neirae687ad62015-05-13 18:19:38 +02004143static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4144 int *ret, struct net_device *orig_dev)
4145{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004146#ifdef CONFIG_NETFILTER_INGRESS
Pablo Neirae687ad62015-05-13 18:19:38 +02004147 if (nf_hook_ingress_active(skb)) {
Aaron Conole2c1e2702016-09-21 11:35:03 -04004148 int ingress_retval;
4149
Pablo Neirae687ad62015-05-13 18:19:38 +02004150 if (*pt_prev) {
4151 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4152 *pt_prev = NULL;
4153 }
4154
Aaron Conole2c1e2702016-09-21 11:35:03 -04004155 rcu_read_lock();
4156 ingress_retval = nf_hook_ingress(skb);
4157 rcu_read_unlock();
4158 return ingress_retval;
Pablo Neirae687ad62015-05-13 18:19:38 +02004159 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004160#endif /* CONFIG_NETFILTER_INGRESS */
Pablo Neirae687ad62015-05-13 18:19:38 +02004161 return 0;
4162}
Pablo Neirae687ad62015-05-13 18:19:38 +02004163
David S. Miller9754e292013-02-14 15:57:38 -05004164static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165{
4166 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004167 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07004168 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004169 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08004171 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
Eric Dumazet588f0332011-11-15 04:12:55 +00004173 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07004174
Koki Sanagicf66ba52010-08-23 18:45:02 +09004175 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08004176
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07004177 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00004178
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07004179 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00004180 if (!skb_transport_header_was_set(skb))
4181 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00004182 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183
4184 pt_prev = NULL;
4185
David S. Miller63d8ea72011-02-28 10:48:59 -08004186another_round:
David S. Millerb6858172012-07-23 16:27:54 -07004187 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08004188
4189 __this_cpu_inc(softnet_data.processed);
4190
Patrick McHardy8ad227f2013-04-19 02:04:31 +00004191 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4192 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004193 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004194 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004195 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004196 }
4197
Willem de Bruijne7246e12017-01-07 17:06:35 -05004198 if (skb_skip_tc_classify(skb))
4199 goto skip_classify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200
David S. Miller9754e292013-02-14 15:57:38 -05004201 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07004202 goto skip_taps;
4203
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08004205 if (pt_prev)
4206 ret = deliver_skb(skb, pt_prev, orig_dev);
4207 pt_prev = ptype;
4208 }
4209
4210 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4211 if (pt_prev)
4212 ret = deliver_skb(skb, pt_prev, orig_dev);
4213 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 }
4215
Mel Gormanb4b9e352012-07-31 16:44:26 -07004216skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02004217#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02004218 if (static_key_false(&ingress_needed)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004219 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
Daniel Borkmann45771392015-04-10 23:07:54 +02004220 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004221 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02004222
4223 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004224 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02004225 }
Pablo Neira1cf519002015-05-13 18:19:37 +02004226#endif
Willem de Bruijna5135bc2017-01-07 17:06:36 -05004227 skb_reset_tc(skb);
Willem de Bruijne7246e12017-01-07 17:06:35 -05004228skip_classify:
David S. Miller9754e292013-02-14 15:57:38 -05004229 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07004230 goto drop;
4231
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004232 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00004233 if (pt_prev) {
4234 ret = deliver_skb(skb, pt_prev, orig_dev);
4235 pt_prev = NULL;
4236 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004237 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00004238 goto another_round;
4239 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004240 goto out;
John Fastabend24257172011-10-10 09:16:41 +00004241 }
4242
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004243 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004244 if (rx_handler) {
4245 if (pt_prev) {
4246 ret = deliver_skb(skb, pt_prev, orig_dev);
4247 pt_prev = NULL;
4248 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004249 switch (rx_handler(&skb)) {
4250 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00004251 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004252 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004253 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08004254 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004255 case RX_HANDLER_EXACT:
4256 deliver_exact = true;
4257 case RX_HANDLER_PASS:
4258 break;
4259 default:
4260 BUG();
4261 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004264 if (unlikely(skb_vlan_tag_present(skb))) {
4265 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07004266 skb->pkt_type = PACKET_OTHERHOST;
4267 /* Note: we might in the future use prio bits
4268 * and set skb->priority like in vlan_do_receive()
4269 * For the time being, just ignore Priority Code Point
4270 */
4271 skb->vlan_tci = 0;
4272 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004273
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08004275
4276 /* deliver only exact match when indicated */
4277 if (likely(!deliver_exact)) {
4278 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4279 &ptype_base[ntohs(type) &
4280 PTYPE_HASH_MASK]);
4281 }
4282
4283 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4284 &orig_dev->ptype_specific);
4285
4286 if (unlikely(skb->dev != orig_dev)) {
4287 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4288 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289 }
4290
4291 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004292 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00004293 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004294 else
4295 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07004297drop:
Jarod Wilson6e7333d2016-02-01 18:51:05 -05004298 if (!deliver_exact)
4299 atomic_long_inc(&skb->dev->rx_dropped);
4300 else
4301 atomic_long_inc(&skb->dev->rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 kfree_skb(skb);
4303 /* Jamal, now you will not able to escape explaining
4304 * me how you were going to use this. :-)
4305 */
4306 ret = NET_RX_DROP;
4307 }
4308
Julian Anastasov2c17d272015-07-09 09:59:10 +03004309out:
David S. Miller9754e292013-02-14 15:57:38 -05004310 return ret;
4311}
4312
4313static int __netif_receive_skb(struct sk_buff *skb)
4314{
4315 int ret;
4316
4317 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004318 unsigned int noreclaim_flag;
David S. Miller9754e292013-02-14 15:57:38 -05004319
4320 /*
4321 * PFMEMALLOC skbs are special, they should
4322 * - be delivered to SOCK_MEMALLOC sockets only
4323 * - stay away from userspace
4324 * - have bounded memory usage
4325 *
4326 * Use PF_MEMALLOC as this saves us from propagating the allocation
4327 * context down to all allocation sites.
4328 */
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004329 noreclaim_flag = memalloc_noreclaim_save();
David S. Miller9754e292013-02-14 15:57:38 -05004330 ret = __netif_receive_skb_core(skb, true);
Vlastimil Babkaf1083042017-05-08 15:59:53 -07004331 memalloc_noreclaim_restore(noreclaim_flag);
David S. Miller9754e292013-02-14 15:57:38 -05004332 } else
4333 ret = __netif_receive_skb_core(skb, false);
4334
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 return ret;
4336}
Tom Herbert0a9627f2010-03-16 08:03:29 +00004337
David S. Millerb5cdae32017-04-18 15:36:58 -04004338static struct static_key generic_xdp_needed __read_mostly;
4339
4340static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
4341{
4342 struct bpf_prog *new = xdp->prog;
4343 int ret = 0;
4344
4345 switch (xdp->command) {
4346 case XDP_SETUP_PROG: {
4347 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
4348
4349 rcu_assign_pointer(dev->xdp_prog, new);
4350 if (old)
4351 bpf_prog_put(old);
4352
4353 if (old && !new) {
4354 static_key_slow_dec(&generic_xdp_needed);
4355 } else if (new && !old) {
4356 static_key_slow_inc(&generic_xdp_needed);
4357 dev_disable_lro(dev);
4358 }
4359 break;
4360 }
4361
4362 case XDP_QUERY_PROG:
4363 xdp->prog_attached = !!rcu_access_pointer(dev->xdp_prog);
4364 break;
4365
4366 default:
4367 ret = -EINVAL;
4368 break;
4369 }
4370
4371 return ret;
4372}
4373
4374static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4375 struct bpf_prog *xdp_prog)
4376{
4377 struct xdp_buff xdp;
4378 u32 act = XDP_DROP;
4379 void *orig_data;
4380 int hlen, off;
4381 u32 mac_len;
4382
4383 /* Reinjected packets coming from act_mirred or similar should
4384 * not get XDP generic processing.
4385 */
4386 if (skb_cloned(skb))
4387 return XDP_PASS;
4388
4389 if (skb_linearize(skb))
4390 goto do_drop;
4391
4392 /* The XDP program wants to see the packet starting at the MAC
4393 * header.
4394 */
4395 mac_len = skb->data - skb_mac_header(skb);
4396 hlen = skb_headlen(skb) + mac_len;
4397 xdp.data = skb->data - mac_len;
4398 xdp.data_end = xdp.data + hlen;
4399 xdp.data_hard_start = skb->data - skb_headroom(skb);
4400 orig_data = xdp.data;
4401
4402 act = bpf_prog_run_xdp(xdp_prog, &xdp);
4403
4404 off = xdp.data - orig_data;
4405 if (off > 0)
4406 __skb_pull(skb, off);
4407 else if (off < 0)
4408 __skb_push(skb, -off);
4409
4410 switch (act) {
4411 case XDP_TX:
4412 __skb_push(skb, mac_len);
4413 /* fall through */
4414 case XDP_PASS:
4415 break;
4416
4417 default:
4418 bpf_warn_invalid_xdp_action(act);
4419 /* fall through */
4420 case XDP_ABORTED:
4421 trace_xdp_exception(skb->dev, xdp_prog, act);
4422 /* fall through */
4423 case XDP_DROP:
4424 do_drop:
4425 kfree_skb(skb);
4426 break;
4427 }
4428
4429 return act;
4430}
4431
4432/* When doing generic XDP we have to bypass the qdisc layer and the
4433 * network taps in order to match in-driver-XDP behavior.
4434 */
4435static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4436{
4437 struct net_device *dev = skb->dev;
4438 struct netdev_queue *txq;
4439 bool free_skb = true;
4440 int cpu, rc;
4441
4442 txq = netdev_pick_tx(dev, skb, NULL);
4443 cpu = smp_processor_id();
4444 HARD_TX_LOCK(dev, txq, cpu);
4445 if (!netif_xmit_stopped(txq)) {
4446 rc = netdev_start_xmit(skb, dev, txq, 0);
4447 if (dev_xmit_complete(rc))
4448 free_skb = false;
4449 }
4450 HARD_TX_UNLOCK(dev, txq);
4451 if (free_skb) {
4452 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4453 kfree_skb(skb);
4454 }
4455}
4456
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004457static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004458{
Julian Anastasov2c17d272015-07-09 09:59:10 +03004459 int ret;
4460
Eric Dumazet588f0332011-11-15 04:12:55 +00004461 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07004462
Richard Cochranc1f19b52010-07-17 08:49:36 +00004463 if (skb_defer_rx_timestamp(skb))
4464 return NET_RX_SUCCESS;
4465
Julian Anastasov2c17d272015-07-09 09:59:10 +03004466 rcu_read_lock();
4467
David S. Millerb5cdae32017-04-18 15:36:58 -04004468 if (static_key_false(&generic_xdp_needed)) {
4469 struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog);
4470
4471 if (xdp_prog) {
4472 u32 act = netif_receive_generic_xdp(skb, xdp_prog);
4473
4474 if (act != XDP_PASS) {
4475 rcu_read_unlock();
4476 if (act == XDP_TX)
4477 generic_xdp_tx(skb, xdp_prog);
4478 return NET_RX_DROP;
4479 }
4480 }
4481 }
4482
Eric Dumazetdf334542010-03-24 19:13:54 +00004483#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004484 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07004485 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004486 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07004487
Eric Dumazet3b098e22010-05-15 23:57:10 -07004488 if (cpu >= 0) {
4489 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4490 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00004491 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004492 }
Tom Herbertfec5e652010-04-16 16:01:27 -07004493 }
Tom Herbert1e94d722010-03-18 17:45:44 -07004494#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03004495 ret = __netif_receive_skb(skb);
4496 rcu_read_unlock();
4497 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004498}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004499
4500/**
4501 * netif_receive_skb - process receive buffer from network
4502 * @skb: buffer to process
4503 *
4504 * netif_receive_skb() is the main receive data processing function.
4505 * It always succeeds. The buffer may be dropped during processing
4506 * for congestion control or by the protocol layers.
4507 *
4508 * This function may only be called from softirq context and interrupts
4509 * should be enabled.
4510 *
4511 * Return values (usually ignored):
4512 * NET_RX_SUCCESS: no congestion
4513 * NET_RX_DROP: packet was dropped
4514 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004515int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004516{
4517 trace_netif_receive_skb_entry(skb);
4518
4519 return netif_receive_skb_internal(skb);
4520}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004521EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522
Eric Dumazet41852492016-08-26 12:50:39 -07004523DEFINE_PER_CPU(struct work_struct, flush_works);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004524
4525/* Network device is going away, flush any packets still pending */
4526static void flush_backlog(struct work_struct *work)
4527{
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004528 struct sk_buff *skb, *tmp;
4529 struct softnet_data *sd;
4530
4531 local_bh_disable();
4532 sd = this_cpu_ptr(&softnet_data);
4533
4534 local_irq_disable();
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004535 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004536 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004537 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004538 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004539 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004540 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004541 }
Changli Gao6e7676c2010-04-27 15:07:33 -07004542 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004543 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004544 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004545
4546 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004547 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Changli Gao6e7676c2010-04-27 15:07:33 -07004548 __skb_unlink(skb, &sd->process_queue);
4549 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004550 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004551 }
4552 }
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004553 local_bh_enable();
4554}
4555
Eric Dumazet41852492016-08-26 12:50:39 -07004556static void flush_all_backlogs(void)
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004557{
4558 unsigned int cpu;
4559
4560 get_online_cpus();
4561
Eric Dumazet41852492016-08-26 12:50:39 -07004562 for_each_online_cpu(cpu)
4563 queue_work_on(cpu, system_highpri_wq,
4564 per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004565
4566 for_each_online_cpu(cpu)
Eric Dumazet41852492016-08-26 12:50:39 -07004567 flush_work(per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004568
4569 put_online_cpus();
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004570}
4571
Herbert Xud565b0a2008-12-15 23:38:52 -08004572static int napi_gro_complete(struct sk_buff *skb)
4573{
Vlad Yasevich22061d82012-11-15 08:49:11 +00004574 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004575 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004576 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08004577 int err = -ENOENT;
4578
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004579 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4580
Herbert Xufc59f9a2009-04-14 15:11:06 -07004581 if (NAPI_GRO_CB(skb)->count == 1) {
4582 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004583 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07004584 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004585
4586 rcu_read_lock();
4587 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004588 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08004589 continue;
4590
Jerry Chu299603e82013-12-11 20:53:45 -08004591 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004592 break;
4593 }
4594 rcu_read_unlock();
4595
4596 if (err) {
4597 WARN_ON(&ptype->list == head);
4598 kfree_skb(skb);
4599 return NET_RX_SUCCESS;
4600 }
4601
4602out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004603 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004604}
4605
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004606/* napi->gro_list contains packets ordered by age.
4607 * youngest packets at the head of it.
4608 * Complete skbs in reverse order to reduce latencies.
4609 */
4610void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08004611{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004612 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004613
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004614 /* scan list and build reverse chain */
4615 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4616 skb->prev = prev;
4617 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08004618 }
4619
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004620 for (skb = prev; skb; skb = prev) {
4621 skb->next = NULL;
4622
4623 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4624 return;
4625
4626 prev = skb->prev;
4627 napi_gro_complete(skb);
4628 napi->gro_count--;
4629 }
4630
Herbert Xud565b0a2008-12-15 23:38:52 -08004631 napi->gro_list = NULL;
4632}
Eric Dumazet86cac582010-08-31 18:25:32 +00004633EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08004634
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004635static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4636{
4637 struct sk_buff *p;
4638 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08004639 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004640
4641 for (p = napi->gro_list; p; p = p->next) {
4642 unsigned long diffs;
4643
Tom Herbert0b4cec82014-01-15 08:58:06 -08004644 NAPI_GRO_CB(p)->flush = 0;
4645
4646 if (hash != skb_get_hash_raw(p)) {
4647 NAPI_GRO_CB(p)->same_flow = 0;
4648 continue;
4649 }
4650
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004651 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4652 diffs |= p->vlan_tci ^ skb->vlan_tci;
Jesse Grossce87fc62016-01-20 17:59:49 -08004653 diffs |= skb_metadata_dst_cmp(p, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004654 if (maclen == ETH_HLEN)
4655 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004656 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004657 else if (!diffs)
4658 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004659 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004660 maclen);
4661 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004662 }
4663}
4664
Jerry Chu299603e82013-12-11 20:53:45 -08004665static void skb_gro_reset_offset(struct sk_buff *skb)
4666{
4667 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4668 const skb_frag_t *frag0 = &pinfo->frags[0];
4669
4670 NAPI_GRO_CB(skb)->data_offset = 0;
4671 NAPI_GRO_CB(skb)->frag0 = NULL;
4672 NAPI_GRO_CB(skb)->frag0_len = 0;
4673
4674 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4675 pinfo->nr_frags &&
4676 !PageHighMem(skb_frag_page(frag0))) {
4677 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
Eric Dumazet7cfd5fd2017-01-10 19:52:43 -08004678 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4679 skb_frag_size(frag0),
4680 skb->end - skb->tail);
Herbert Xud565b0a2008-12-15 23:38:52 -08004681 }
4682}
4683
Eric Dumazeta50e2332014-03-29 21:28:21 -07004684static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4685{
4686 struct skb_shared_info *pinfo = skb_shinfo(skb);
4687
4688 BUG_ON(skb->end - skb->tail < grow);
4689
4690 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4691
4692 skb->data_len -= grow;
4693 skb->tail += grow;
4694
4695 pinfo->frags[0].page_offset += grow;
4696 skb_frag_size_sub(&pinfo->frags[0], grow);
4697
4698 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4699 skb_frag_unref(skb, 0);
4700 memmove(pinfo->frags, pinfo->frags + 1,
4701 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4702 }
4703}
4704
Rami Rosenbb728822012-11-28 21:55:25 +00004705static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004706{
4707 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004708 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004709 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004710 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004711 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004712 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004713 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004714
David S. Millerb5cdae32017-04-18 15:36:58 -04004715 if (netif_elide_gro(skb->dev))
Herbert Xud565b0a2008-12-15 23:38:52 -08004716 goto normal;
4717
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004718 gro_list_prepare(napi, skb);
4719
Herbert Xud565b0a2008-12-15 23:38:52 -08004720 rcu_read_lock();
4721 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004722 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004723 continue;
4724
Herbert Xu86911732009-01-29 14:19:50 +00004725 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004726 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004727 NAPI_GRO_CB(skb)->same_flow = 0;
Eric Dumazetd61d0722016-11-07 11:12:27 -08004728 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
Herbert Xu5d38a072009-01-04 16:13:40 -08004729 NAPI_GRO_CB(skb)->free = 0;
Jesse Grossfac8e0f2016-03-19 09:32:01 -07004730 NAPI_GRO_CB(skb)->encap_mark = 0;
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02004731 NAPI_GRO_CB(skb)->recursion_counter = 0;
Alexander Duycka0ca1532016-04-05 09:13:39 -07004732 NAPI_GRO_CB(skb)->is_fou = 0;
Alexander Duyck15305452016-04-10 21:44:57 -04004733 NAPI_GRO_CB(skb)->is_atomic = 1;
Tom Herbert15e23962015-02-10 16:30:31 -08004734 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004735
Tom Herbert662880f2014-08-27 21:26:56 -07004736 /* Setup for GRO checksum validation */
4737 switch (skb->ip_summed) {
4738 case CHECKSUM_COMPLETE:
4739 NAPI_GRO_CB(skb)->csum = skb->csum;
4740 NAPI_GRO_CB(skb)->csum_valid = 1;
4741 NAPI_GRO_CB(skb)->csum_cnt = 0;
4742 break;
4743 case CHECKSUM_UNNECESSARY:
4744 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4745 NAPI_GRO_CB(skb)->csum_valid = 0;
4746 break;
4747 default:
4748 NAPI_GRO_CB(skb)->csum_cnt = 0;
4749 NAPI_GRO_CB(skb)->csum_valid = 0;
4750 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004751
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004752 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004753 break;
4754 }
4755 rcu_read_unlock();
4756
4757 if (&ptype->list == head)
4758 goto normal;
4759
Steffen Klassert25393d32017-02-15 09:39:44 +01004760 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4761 ret = GRO_CONSUMED;
4762 goto ok;
4763 }
4764
Herbert Xu0da2afd52008-12-26 14:57:42 -08004765 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004766 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004767
Herbert Xud565b0a2008-12-15 23:38:52 -08004768 if (pp) {
4769 struct sk_buff *nskb = *pp;
4770
4771 *pp = nskb->next;
4772 nskb->next = NULL;
4773 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00004774 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08004775 }
4776
Herbert Xu0da2afd52008-12-26 14:57:42 -08004777 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08004778 goto ok;
4779
Eric Dumazet600adc12014-01-09 14:12:19 -08004780 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08004781 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08004782
Eric Dumazet600adc12014-01-09 14:12:19 -08004783 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4784 struct sk_buff *nskb = napi->gro_list;
4785
4786 /* locate the end of the list to select the 'oldest' flow */
4787 while (nskb->next) {
4788 pp = &nskb->next;
4789 nskb = *pp;
4790 }
4791 *pp = NULL;
4792 nskb->next = NULL;
4793 napi_gro_complete(nskb);
4794 } else {
4795 napi->gro_count++;
4796 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004797 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004798 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004799 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004800 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004801 skb->next = napi->gro_list;
4802 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004803 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004804
Herbert Xuad0f9902009-02-01 01:24:55 -08004805pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004806 grow = skb_gro_offset(skb) - skb_headlen(skb);
4807 if (grow > 0)
4808 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004809ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004810 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004811
4812normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004813 ret = GRO_NORMAL;
4814 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004815}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004816
Jerry Chubf5a7552014-01-07 10:23:19 -08004817struct packet_offload *gro_find_receive_by_type(__be16 type)
4818{
4819 struct list_head *offload_head = &offload_base;
4820 struct packet_offload *ptype;
4821
4822 list_for_each_entry_rcu(ptype, offload_head, list) {
4823 if (ptype->type != type || !ptype->callbacks.gro_receive)
4824 continue;
4825 return ptype;
4826 }
4827 return NULL;
4828}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004829EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004830
4831struct packet_offload *gro_find_complete_by_type(__be16 type)
4832{
4833 struct list_head *offload_head = &offload_base;
4834 struct packet_offload *ptype;
4835
4836 list_for_each_entry_rcu(ptype, offload_head, list) {
4837 if (ptype->type != type || !ptype->callbacks.gro_complete)
4838 continue;
4839 return ptype;
4840 }
4841 return NULL;
4842}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004843EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004844
Rami Rosenbb728822012-11-28 21:55:25 +00004845static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004846{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004847 switch (ret) {
4848 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004849 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004850 ret = GRO_DROP;
4851 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004852
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004853 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004854 kfree_skb(skb);
4855 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004856
Eric Dumazetdaa86542012-04-19 07:07:40 +00004857 case GRO_MERGED_FREE:
Jesse Grossce87fc62016-01-20 17:59:49 -08004858 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4859 skb_dst_drop(skb);
Steffen Klassertf991bb92017-01-30 06:45:38 +01004860 secpath_reset(skb);
Eric Dumazetd7e88832012-04-30 08:10:34 +00004861 kmem_cache_free(skbuff_head_cache, skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004862 } else {
Eric Dumazetd7e88832012-04-30 08:10:34 +00004863 __kfree_skb(skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004864 }
Eric Dumazetdaa86542012-04-19 07:07:40 +00004865 break;
4866
Ben Hutchings5b252f02009-10-29 07:17:09 +00004867 case GRO_HELD:
4868 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004869 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004870 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004871 }
4872
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004873 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004874}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004875
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004876gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004877{
Eric Dumazet93f93a42015-11-18 06:30:59 -08004878 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004879 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004880
Eric Dumazeta50e2332014-03-29 21:28:21 -07004881 skb_gro_reset_offset(skb);
4882
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004883 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004884}
4885EXPORT_SYMBOL(napi_gro_receive);
4886
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004887static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004888{
Eric Dumazet93a35f52014-10-23 06:30:30 -07004889 if (unlikely(skb->pfmemalloc)) {
4890 consume_skb(skb);
4891 return;
4892 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004893 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004894 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4895 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004896 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004897 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004898 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07004899 skb->encapsulation = 0;
4900 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004901 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Steffen Klassertf991bb92017-01-30 06:45:38 +01004902 secpath_reset(skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004903
4904 napi->skb = skb;
4905}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004906
Herbert Xu76620aa2009-04-16 02:02:07 -07004907struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004908{
Herbert Xu5d38a072009-01-04 16:13:40 -08004909 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004910
4911 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08004912 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08004913 if (skb) {
4914 napi->skb = skb;
4915 skb_mark_napi_id(skb, napi);
4916 }
Herbert Xu5d38a072009-01-04 16:13:40 -08004917 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004918 return skb;
4919}
Herbert Xu76620aa2009-04-16 02:02:07 -07004920EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004921
Eric Dumazeta50e2332014-03-29 21:28:21 -07004922static gro_result_t napi_frags_finish(struct napi_struct *napi,
4923 struct sk_buff *skb,
4924 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004925{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004926 switch (ret) {
4927 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004928 case GRO_HELD:
4929 __skb_push(skb, ETH_HLEN);
4930 skb->protocol = eth_type_trans(skb, skb->dev);
4931 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004932 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004933 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004934
4935 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004936 case GRO_MERGED_FREE:
4937 napi_reuse_skb(napi, skb);
4938 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004939
4940 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004941 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004942 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004943 }
4944
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004945 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004946}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004947
Eric Dumazeta50e2332014-03-29 21:28:21 -07004948/* Upper GRO stack assumes network header starts at gro_offset=0
4949 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4950 * We copy ethernet header into skb->data to have a common layout.
4951 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004952static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004953{
Herbert Xu76620aa2009-04-16 02:02:07 -07004954 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004955 const struct ethhdr *eth;
4956 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07004957
4958 napi->skb = NULL;
4959
Eric Dumazeta50e2332014-03-29 21:28:21 -07004960 skb_reset_mac_header(skb);
4961 skb_gro_reset_offset(skb);
4962
4963 eth = skb_gro_header_fast(skb, 0);
4964 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4965 eth = skb_gro_header_slow(skb, hlen, 0);
4966 if (unlikely(!eth)) {
Aaron Conole4da46ce2016-04-02 15:26:43 -04004967 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
4968 __func__, napi->dev->name);
Eric Dumazeta50e2332014-03-29 21:28:21 -07004969 napi_reuse_skb(napi, skb);
4970 return NULL;
4971 }
4972 } else {
4973 gro_pull_from_frag0(skb, hlen);
4974 NAPI_GRO_CB(skb)->frag0 += hlen;
4975 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07004976 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07004977 __skb_pull(skb, hlen);
4978
4979 /*
4980 * This works because the only protocols we care about don't require
4981 * special handling.
4982 * We'll fix it up properly in napi_frags_finish()
4983 */
4984 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07004985
Herbert Xu76620aa2009-04-16 02:02:07 -07004986 return skb;
4987}
Herbert Xu76620aa2009-04-16 02:02:07 -07004988
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004989gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004990{
4991 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004992
4993 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004994 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004995
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004996 trace_napi_gro_frags_entry(skb);
4997
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004998 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004999}
5000EXPORT_SYMBOL(napi_gro_frags);
5001
Tom Herbert573e8fc2014-08-22 13:33:47 -07005002/* Compute the checksum from gro_offset and return the folded value
5003 * after adding in any pseudo checksum.
5004 */
5005__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5006{
5007 __wsum wsum;
5008 __sum16 sum;
5009
5010 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5011
5012 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5013 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5014 if (likely(!sum)) {
5015 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5016 !skb->csum_complete_sw)
5017 netdev_rx_csum_fault(skb->dev);
5018 }
5019
5020 NAPI_GRO_CB(skb)->csum = wsum;
5021 NAPI_GRO_CB(skb)->csum_valid = 1;
5022
5023 return sum;
5024}
5025EXPORT_SYMBOL(__skb_gro_checksum_complete);
5026
Eric Dumazete326bed2010-04-22 00:22:45 -07005027/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08005028 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07005029 * Note: called with local irq disabled, but exits with local irq enabled.
5030 */
5031static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5032{
5033#ifdef CONFIG_RPS
5034 struct softnet_data *remsd = sd->rps_ipi_list;
5035
5036 if (remsd) {
5037 sd->rps_ipi_list = NULL;
5038
5039 local_irq_enable();
5040
5041 /* Send pending IPI's to kick RPS processing on remote cpus. */
5042 while (remsd) {
5043 struct softnet_data *next = remsd->rps_ipi_next;
5044
5045 if (cpu_online(remsd->cpu))
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +01005046 smp_call_function_single_async(remsd->cpu,
Frederic Weisbeckerfce8ad12014-02-24 16:40:01 +01005047 &remsd->csd);
Eric Dumazete326bed2010-04-22 00:22:45 -07005048 remsd = next;
5049 }
5050 } else
5051#endif
5052 local_irq_enable();
5053}
5054
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005055static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5056{
5057#ifdef CONFIG_RPS
5058 return sd->rps_ipi_list != NULL;
5059#else
5060 return false;
5061#endif
5062}
5063
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005064static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065{
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005066 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005067 bool again = true;
5068 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069
Eric Dumazete326bed2010-04-22 00:22:45 -07005070 /* Check if we have pending ipi, its better to send them now,
5071 * not waiting net_rx_action() end.
5072 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005073 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07005074 local_irq_disable();
5075 net_rps_action_and_irq_enable(sd);
5076 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005077
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01005078 napi->weight = dev_rx_weight;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005079 while (again) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081
Changli Gao6e7676c2010-04-27 15:07:33 -07005082 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03005083 rcu_read_lock();
Changli Gao6e7676c2010-04-27 15:07:33 -07005084 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03005085 rcu_read_unlock();
Tom Herbert76cc8b12010-05-20 18:37:59 +00005086 input_queue_head_incr(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005087 if (++work >= quota)
Tom Herbert76cc8b12010-05-20 18:37:59 +00005088 return work;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005089
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005092 local_irq_disable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005093 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07005094 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005095 /*
5096 * Inline a custom version of __napi_complete().
5097 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07005098 * and NAPI_STATE_SCHED is the only possible flag set
5099 * on backlog.
5100 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005101 * and we dont need an smp_mb() memory barrier.
5102 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005103 napi->state = 0;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005104 again = false;
5105 } else {
5106 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5107 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07005108 }
5109 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005110 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005113 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114}
5115
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005116/**
5117 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005118 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005119 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005120 * The entry's receive function will be scheduled to run.
5121 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005122 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08005123void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005124{
5125 unsigned long flags;
5126
5127 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05005128 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005129 local_irq_restore(flags);
5130}
5131EXPORT_SYMBOL(__napi_schedule);
5132
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005133/**
Eric Dumazet39e6c822017-02-28 10:34:50 -08005134 * napi_schedule_prep - check if napi can be scheduled
5135 * @n: napi context
5136 *
5137 * Test if NAPI routine is already running, and if not mark
5138 * it as running. This is used as a condition variable
5139 * insure only one NAPI poll instance runs. We also make
5140 * sure there is no pending NAPI disable.
5141 */
5142bool napi_schedule_prep(struct napi_struct *n)
5143{
5144 unsigned long val, new;
5145
5146 do {
5147 val = READ_ONCE(n->state);
5148 if (unlikely(val & NAPIF_STATE_DISABLE))
5149 return false;
5150 new = val | NAPIF_STATE_SCHED;
5151
5152 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5153 * This was suggested by Alexander Duyck, as compiler
5154 * emits better code than :
5155 * if (val & NAPIF_STATE_SCHED)
5156 * new |= NAPIF_STATE_MISSED;
5157 */
5158 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5159 NAPIF_STATE_MISSED;
5160 } while (cmpxchg(&n->state, val, new) != val);
5161
5162 return !(val & NAPIF_STATE_SCHED);
5163}
5164EXPORT_SYMBOL(napi_schedule_prep);
5165
5166/**
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005167 * __napi_schedule_irqoff - schedule for receive
5168 * @n: entry to schedule
5169 *
5170 * Variant of __napi_schedule() assuming hard irqs are masked
5171 */
5172void __napi_schedule_irqoff(struct napi_struct *n)
5173{
5174 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5175}
5176EXPORT_SYMBOL(__napi_schedule_irqoff);
5177
Eric Dumazet364b6052016-11-15 10:15:13 -08005178bool napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08005179{
Eric Dumazet39e6c822017-02-28 10:34:50 -08005180 unsigned long flags, val, new;
Herbert Xud565b0a2008-12-15 23:38:52 -08005181
5182 /*
Eric Dumazet217f6972016-11-15 10:15:11 -08005183 * 1) Don't let napi dequeue from the cpu poll list
5184 * just in case its running on a different cpu.
5185 * 2) If we are busy polling, do nothing here, we have
5186 * the guarantee we will be called later.
Herbert Xud565b0a2008-12-15 23:38:52 -08005187 */
Eric Dumazet217f6972016-11-15 10:15:11 -08005188 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5189 NAPIF_STATE_IN_BUSY_POLL)))
Eric Dumazet364b6052016-11-15 10:15:13 -08005190 return false;
Herbert Xud565b0a2008-12-15 23:38:52 -08005191
Eric Dumazet3b47d302014-11-06 21:09:44 -08005192 if (n->gro_list) {
5193 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005194
Eric Dumazet3b47d302014-11-06 21:09:44 -08005195 if (work_done)
5196 timeout = n->dev->gro_flush_timeout;
5197
5198 if (timeout)
5199 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5200 HRTIMER_MODE_REL_PINNED);
5201 else
5202 napi_gro_flush(n, false);
5203 }
Eric Dumazet02c16022017-02-04 15:25:02 -08005204 if (unlikely(!list_empty(&n->poll_list))) {
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005205 /* If n->poll_list is not empty, we need to mask irqs */
5206 local_irq_save(flags);
Eric Dumazet02c16022017-02-04 15:25:02 -08005207 list_del_init(&n->poll_list);
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005208 local_irq_restore(flags);
5209 }
Eric Dumazet39e6c822017-02-28 10:34:50 -08005210
5211 do {
5212 val = READ_ONCE(n->state);
5213
5214 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5215
5216 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5217
5218 /* If STATE_MISSED was set, leave STATE_SCHED set,
5219 * because we will call napi->poll() one more time.
5220 * This C code was suggested by Alexander Duyck to help gcc.
5221 */
5222 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5223 NAPIF_STATE_SCHED;
5224 } while (cmpxchg(&n->state, val, new) != val);
5225
5226 if (unlikely(val & NAPIF_STATE_MISSED)) {
5227 __napi_schedule(n);
5228 return false;
5229 }
5230
Eric Dumazet364b6052016-11-15 10:15:13 -08005231 return true;
Herbert Xud565b0a2008-12-15 23:38:52 -08005232}
Eric Dumazet3b47d302014-11-06 21:09:44 -08005233EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08005234
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005235/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08005236static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005237{
5238 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5239 struct napi_struct *napi;
5240
5241 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5242 if (napi->napi_id == napi_id)
5243 return napi;
5244
5245 return NULL;
5246}
Eric Dumazet02d62e82015-11-18 06:30:52 -08005247
5248#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazet217f6972016-11-15 10:15:11 -08005249
Eric Dumazetce6aea92015-11-18 06:30:54 -08005250#define BUSY_POLL_BUDGET 8
Eric Dumazet217f6972016-11-15 10:15:11 -08005251
5252static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5253{
5254 int rc;
5255
Eric Dumazet39e6c822017-02-28 10:34:50 -08005256 /* Busy polling means there is a high chance device driver hard irq
5257 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5258 * set in napi_schedule_prep().
5259 * Since we are about to call napi->poll() once more, we can safely
5260 * clear NAPI_STATE_MISSED.
5261 *
5262 * Note: x86 could use a single "lock and ..." instruction
5263 * to perform these two clear_bit()
5264 */
5265 clear_bit(NAPI_STATE_MISSED, &napi->state);
Eric Dumazet217f6972016-11-15 10:15:11 -08005266 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5267
5268 local_bh_disable();
5269
5270 /* All we really want here is to re-enable device interrupts.
5271 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5272 */
5273 rc = napi->poll(napi, BUSY_POLL_BUDGET);
5274 netpoll_poll_unlock(have_poll_lock);
5275 if (rc == BUSY_POLL_BUDGET)
5276 __napi_schedule(napi);
5277 local_bh_enable();
5278 if (local_softirq_pending())
5279 do_softirq();
5280}
5281
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005282void napi_busy_loop(unsigned int napi_id,
5283 bool (*loop_end)(void *, unsigned long),
5284 void *loop_end_arg)
Eric Dumazet02d62e82015-11-18 06:30:52 -08005285{
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005286 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
Eric Dumazet217f6972016-11-15 10:15:11 -08005287 int (*napi_poll)(struct napi_struct *napi, int budget);
Eric Dumazet217f6972016-11-15 10:15:11 -08005288 void *have_poll_lock = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005289 struct napi_struct *napi;
Eric Dumazet217f6972016-11-15 10:15:11 -08005290
5291restart:
Eric Dumazet217f6972016-11-15 10:15:11 -08005292 napi_poll = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005293
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005294 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005295
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005296 napi = napi_by_id(napi_id);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005297 if (!napi)
5298 goto out;
5299
Eric Dumazet217f6972016-11-15 10:15:11 -08005300 preempt_disable();
5301 for (;;) {
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005302 int work = 0;
5303
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005304 local_bh_disable();
Eric Dumazet217f6972016-11-15 10:15:11 -08005305 if (!napi_poll) {
5306 unsigned long val = READ_ONCE(napi->state);
5307
5308 /* If multiple threads are competing for this napi,
5309 * we avoid dirtying napi->state as much as we can.
5310 */
5311 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5312 NAPIF_STATE_IN_BUSY_POLL))
5313 goto count;
5314 if (cmpxchg(&napi->state, val,
5315 val | NAPIF_STATE_IN_BUSY_POLL |
5316 NAPIF_STATE_SCHED) != val)
5317 goto count;
5318 have_poll_lock = netpoll_poll_lock(napi);
5319 napi_poll = napi->poll;
5320 }
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005321 work = napi_poll(napi, BUSY_POLL_BUDGET);
5322 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
Eric Dumazet217f6972016-11-15 10:15:11 -08005323count:
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005324 if (work > 0)
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005325 __NET_ADD_STATS(dev_net(napi->dev),
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005326 LINUX_MIB_BUSYPOLLRXPACKETS, work);
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005327 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005328
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005329 if (!loop_end || loop_end(loop_end_arg, start_time))
Eric Dumazet217f6972016-11-15 10:15:11 -08005330 break;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005331
Eric Dumazet217f6972016-11-15 10:15:11 -08005332 if (unlikely(need_resched())) {
5333 if (napi_poll)
5334 busy_poll_stop(napi, have_poll_lock);
5335 preempt_enable();
5336 rcu_read_unlock();
5337 cond_resched();
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005338 if (loop_end(loop_end_arg, start_time))
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005339 return;
Eric Dumazet217f6972016-11-15 10:15:11 -08005340 goto restart;
5341 }
Linus Torvalds6cdf89b2016-12-12 10:48:02 -08005342 cpu_relax();
Eric Dumazet217f6972016-11-15 10:15:11 -08005343 }
5344 if (napi_poll)
5345 busy_poll_stop(napi, have_poll_lock);
5346 preempt_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005347out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005348 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005349}
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005350EXPORT_SYMBOL(napi_busy_loop);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005351
5352#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005353
Eric Dumazet149d6ad2016-11-08 11:07:28 -08005354static void napi_hash_add(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005355{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005356 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5357 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005358 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005359
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005360 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005361
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005362 /* 0..NR_CPUS range is reserved for sender_cpu use */
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005363 do {
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005364 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5365 napi_gen_id = MIN_NAPI_ID;
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005366 } while (napi_by_id(napi_gen_id));
5367 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005368
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005369 hlist_add_head_rcu(&napi->napi_hash_node,
5370 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005371
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005372 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005373}
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005374
5375/* Warning : caller is responsible to make sure rcu grace period
5376 * is respected before freeing memory containing @napi
5377 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08005378bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005379{
Eric Dumazet34cbe272015-11-18 06:31:02 -08005380 bool rcu_sync_needed = false;
5381
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005382 spin_lock(&napi_hash_lock);
5383
Eric Dumazet34cbe272015-11-18 06:31:02 -08005384 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5385 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005386 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005387 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005388 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005389 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005390}
5391EXPORT_SYMBOL_GPL(napi_hash_del);
5392
Eric Dumazet3b47d302014-11-06 21:09:44 -08005393static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5394{
5395 struct napi_struct *napi;
5396
5397 napi = container_of(timer, struct napi_struct, timer);
Eric Dumazet39e6c822017-02-28 10:34:50 -08005398
5399 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5400 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5401 */
5402 if (napi->gro_list && !napi_disable_pending(napi) &&
5403 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5404 __napi_schedule_irqoff(napi);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005405
5406 return HRTIMER_NORESTART;
5407}
5408
Herbert Xud565b0a2008-12-15 23:38:52 -08005409void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5410 int (*poll)(struct napi_struct *, int), int weight)
5411{
5412 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005413 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5414 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00005415 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005416 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08005417 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08005418 napi->poll = poll;
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00005419 if (weight > NAPI_POLL_WEIGHT)
5420 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5421 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08005422 napi->weight = weight;
5423 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005424 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08005425#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08005426 napi->poll_owner = -1;
5427#endif
5428 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08005429 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005430}
5431EXPORT_SYMBOL(netif_napi_add);
5432
Eric Dumazet3b47d302014-11-06 21:09:44 -08005433void napi_disable(struct napi_struct *n)
5434{
5435 might_sleep();
5436 set_bit(NAPI_STATE_DISABLE, &n->state);
5437
5438 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5439 msleep(1);
Neil Horman2d8bff1262015-09-23 14:57:58 -04005440 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5441 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005442
5443 hrtimer_cancel(&n->timer);
5444
5445 clear_bit(NAPI_STATE_DISABLE, &n->state);
5446}
5447EXPORT_SYMBOL(napi_disable);
5448
Eric Dumazet93d05d42015-11-18 06:31:03 -08005449/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08005450void netif_napi_del(struct napi_struct *napi)
5451{
Eric Dumazet93d05d42015-11-18 06:31:03 -08005452 might_sleep();
5453 if (napi_hash_del(napi))
5454 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08005455 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07005456 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005457
Eric Dumazet289dccb2013-12-20 14:29:08 -08005458 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005459 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005460 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005461}
5462EXPORT_SYMBOL(netif_napi_del);
5463
Herbert Xu726ce702014-12-21 07:16:21 +11005464static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5465{
5466 void *have;
5467 int work, weight;
5468
5469 list_del_init(&n->poll_list);
5470
5471 have = netpoll_poll_lock(n);
5472
5473 weight = n->weight;
5474
5475 /* This NAPI_STATE_SCHED test is for avoiding a race
5476 * with netpoll's poll_napi(). Only the entity which
5477 * obtains the lock and sees NAPI_STATE_SCHED set will
5478 * actually make the ->poll() call. Therefore we avoid
5479 * accidentally calling ->poll() when NAPI is not scheduled.
5480 */
5481 work = 0;
5482 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5483 work = n->poll(n, weight);
Jesper Dangaard Brouer1db19db2016-07-07 18:01:32 +02005484 trace_napi_poll(n, work, weight);
Herbert Xu726ce702014-12-21 07:16:21 +11005485 }
5486
5487 WARN_ON_ONCE(work > weight);
5488
5489 if (likely(work < weight))
5490 goto out_unlock;
5491
5492 /* Drivers must not modify the NAPI state if they
5493 * consume the entire weight. In such cases this code
5494 * still "owns" the NAPI instance and therefore can
5495 * move the instance around on the list at-will.
5496 */
5497 if (unlikely(napi_disable_pending(n))) {
5498 napi_complete(n);
5499 goto out_unlock;
5500 }
5501
5502 if (n->gro_list) {
5503 /* flush too old packets
5504 * If HZ < 1000, flush all packets.
5505 */
5506 napi_gro_flush(n, HZ >= 1000);
5507 }
5508
Herbert Xu001ce542014-12-21 07:16:22 +11005509 /* Some drivers may have called napi_schedule
5510 * prior to exhausting their budget.
5511 */
5512 if (unlikely(!list_empty(&n->poll_list))) {
5513 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5514 n->dev ? n->dev->name : "backlog");
5515 goto out_unlock;
5516 }
5517
Herbert Xu726ce702014-12-21 07:16:21 +11005518 list_add_tail(&n->poll_list, repoll);
5519
5520out_unlock:
5521 netpoll_poll_unlock(have);
5522
5523 return work;
5524}
5525
Emese Revfy0766f782016-06-20 20:42:34 +02005526static __latent_entropy void net_rx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005527{
Christoph Lameter903ceff2014-08-17 12:30:35 -05005528 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04005529 unsigned long time_limit = jiffies +
5530 usecs_to_jiffies(netdev_budget_usecs);
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07005531 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005532 LIST_HEAD(list);
5533 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07005534
Linus Torvalds1da177e2005-04-16 15:20:36 -07005535 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005536 list_splice_init(&sd->poll_list, &list);
5537 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005538
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005539 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005540 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005541
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005542 if (list_empty(&list)) {
5543 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005544 goto out;
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005545 break;
5546 }
5547
Herbert Xu6bd373e2014-12-21 07:16:24 +11005548 n = list_first_entry(&list, struct napi_struct, poll_list);
5549 budget -= napi_poll(n, &repoll);
5550
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005551 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005552 * Allow this to run for 2 jiffies since which will allow
5553 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005554 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005555 if (unlikely(budget <= 0 ||
5556 time_after_eq(jiffies, time_limit))) {
5557 sd->time_squeeze++;
5558 break;
5559 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005561
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005562 local_irq_disable();
5563
5564 list_splice_tail_init(&sd->poll_list, &list);
5565 list_splice_tail(&repoll, &list);
5566 list_splice(&list, &sd->poll_list);
5567 if (!list_empty(&sd->poll_list))
5568 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5569
Eric Dumazete326bed2010-04-22 00:22:45 -07005570 net_rps_action_and_irq_enable(sd);
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005571out:
5572 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005573}
5574
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005575struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005576 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005577
5578 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005579 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005580
Veaceslav Falico5d261912013-08-28 23:25:05 +02005581 /* counter for the number of times this device was added to us */
5582 u16 ref_nr;
5583
Veaceslav Falico402dae92013-09-25 09:20:09 +02005584 /* private field for the users */
5585 void *private;
5586
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005587 struct list_head list;
5588 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005589};
5590
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005591static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005592 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005593{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005594 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005595
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005596 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005597 if (adj->dev == adj_dev)
5598 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005599 }
5600 return NULL;
5601}
5602
David Ahernf1170fd2016-10-17 19:15:51 -07005603static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5604{
5605 struct net_device *dev = data;
5606
5607 return upper_dev == dev;
5608}
5609
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005610/**
5611 * netdev_has_upper_dev - Check if device is linked to an upper device
5612 * @dev: device
5613 * @upper_dev: upper device to check
5614 *
5615 * Find out if a device is linked to specified upper device and return true
5616 * in case it is. Note that this checks only immediate upper device,
5617 * not through a complete stack of devices. The caller must hold the RTNL lock.
5618 */
5619bool netdev_has_upper_dev(struct net_device *dev,
5620 struct net_device *upper_dev)
5621{
5622 ASSERT_RTNL();
5623
David Ahernf1170fd2016-10-17 19:15:51 -07005624 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5625 upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005626}
5627EXPORT_SYMBOL(netdev_has_upper_dev);
5628
5629/**
David Ahern1a3f0602016-10-17 19:15:44 -07005630 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5631 * @dev: device
5632 * @upper_dev: upper device to check
5633 *
5634 * Find out if a device is linked to specified upper device and return true
5635 * in case it is. Note that this checks the entire upper device chain.
5636 * The caller must hold rcu lock.
5637 */
5638
David Ahern1a3f0602016-10-17 19:15:44 -07005639bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5640 struct net_device *upper_dev)
5641{
5642 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5643 upper_dev);
5644}
5645EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5646
5647/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005648 * netdev_has_any_upper_dev - Check if device is linked to some device
5649 * @dev: device
5650 *
5651 * Find out if a device is linked to an upper device and return true in case
5652 * it is. The caller must hold the RTNL lock.
5653 */
stephen hemminger1d143d92013-12-29 14:01:29 -08005654static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005655{
5656 ASSERT_RTNL();
5657
David Ahernf1170fd2016-10-17 19:15:51 -07005658 return !list_empty(&dev->adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005659}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005660
5661/**
5662 * netdev_master_upper_dev_get - Get master upper device
5663 * @dev: device
5664 *
5665 * Find a master upper device and return pointer to it or NULL in case
5666 * it's not there. The caller must hold the RTNL lock.
5667 */
5668struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5669{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005670 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005671
5672 ASSERT_RTNL();
5673
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005674 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005675 return NULL;
5676
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005677 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005678 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005679 if (likely(upper->master))
5680 return upper->dev;
5681 return NULL;
5682}
5683EXPORT_SYMBOL(netdev_master_upper_dev_get);
5684
David Ahern0f524a82016-10-17 19:15:52 -07005685/**
5686 * netdev_has_any_lower_dev - Check if device is linked to some device
5687 * @dev: device
5688 *
5689 * Find out if a device is linked to a lower device and return true in case
5690 * it is. The caller must hold the RTNL lock.
5691 */
5692static bool netdev_has_any_lower_dev(struct net_device *dev)
5693{
5694 ASSERT_RTNL();
5695
5696 return !list_empty(&dev->adj_list.lower);
5697}
5698
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02005699void *netdev_adjacent_get_private(struct list_head *adj_list)
5700{
5701 struct netdev_adjacent *adj;
5702
5703 adj = list_entry(adj_list, struct netdev_adjacent, list);
5704
5705 return adj->private;
5706}
5707EXPORT_SYMBOL(netdev_adjacent_get_private);
5708
Veaceslav Falico31088a12013-09-25 09:20:12 +02005709/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04005710 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5711 * @dev: device
5712 * @iter: list_head ** of the current position
5713 *
5714 * Gets the next device from the dev's upper list, starting from iter
5715 * position. The caller must hold RCU read lock.
5716 */
5717struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5718 struct list_head **iter)
5719{
5720 struct netdev_adjacent *upper;
5721
5722 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5723
5724 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5725
5726 if (&upper->list == &dev->adj_list.upper)
5727 return NULL;
5728
5729 *iter = &upper->list;
5730
5731 return upper->dev;
5732}
5733EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5734
David Ahern1a3f0602016-10-17 19:15:44 -07005735static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5736 struct list_head **iter)
5737{
5738 struct netdev_adjacent *upper;
5739
5740 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5741
5742 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5743
5744 if (&upper->list == &dev->adj_list.upper)
5745 return NULL;
5746
5747 *iter = &upper->list;
5748
5749 return upper->dev;
5750}
5751
5752int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5753 int (*fn)(struct net_device *dev,
5754 void *data),
5755 void *data)
5756{
5757 struct net_device *udev;
5758 struct list_head *iter;
5759 int ret;
5760
5761 for (iter = &dev->adj_list.upper,
5762 udev = netdev_next_upper_dev_rcu(dev, &iter);
5763 udev;
5764 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5765 /* first is the upper device itself */
5766 ret = fn(udev, data);
5767 if (ret)
5768 return ret;
5769
5770 /* then look at all of its upper devices */
5771 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5772 if (ret)
5773 return ret;
5774 }
5775
5776 return 0;
5777}
5778EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5779
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005780/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005781 * netdev_lower_get_next_private - Get the next ->private from the
5782 * lower neighbour list
5783 * @dev: device
5784 * @iter: list_head ** of the current position
5785 *
5786 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5787 * list, starting from iter position. The caller must hold either hold the
5788 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005789 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02005790 */
5791void *netdev_lower_get_next_private(struct net_device *dev,
5792 struct list_head **iter)
5793{
5794 struct netdev_adjacent *lower;
5795
5796 lower = list_entry(*iter, struct netdev_adjacent, list);
5797
5798 if (&lower->list == &dev->adj_list.lower)
5799 return NULL;
5800
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005801 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005802
5803 return lower->private;
5804}
5805EXPORT_SYMBOL(netdev_lower_get_next_private);
5806
5807/**
5808 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5809 * lower neighbour list, RCU
5810 * variant
5811 * @dev: device
5812 * @iter: list_head ** of the current position
5813 *
5814 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5815 * list, starting from iter position. The caller must hold RCU read lock.
5816 */
5817void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5818 struct list_head **iter)
5819{
5820 struct netdev_adjacent *lower;
5821
5822 WARN_ON_ONCE(!rcu_read_lock_held());
5823
5824 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5825
5826 if (&lower->list == &dev->adj_list.lower)
5827 return NULL;
5828
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005829 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005830
5831 return lower->private;
5832}
5833EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5834
5835/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005836 * netdev_lower_get_next - Get the next device from the lower neighbour
5837 * list
5838 * @dev: device
5839 * @iter: list_head ** of the current position
5840 *
5841 * Gets the next netdev_adjacent from the dev's lower neighbour
5842 * list, starting from iter position. The caller must hold RTNL lock or
5843 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005844 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005845 */
5846void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5847{
5848 struct netdev_adjacent *lower;
5849
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005850 lower = list_entry(*iter, struct netdev_adjacent, list);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005851
5852 if (&lower->list == &dev->adj_list.lower)
5853 return NULL;
5854
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005855 *iter = lower->list.next;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005856
5857 return lower->dev;
5858}
5859EXPORT_SYMBOL(netdev_lower_get_next);
5860
David Ahern1a3f0602016-10-17 19:15:44 -07005861static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5862 struct list_head **iter)
5863{
5864 struct netdev_adjacent *lower;
5865
David Ahern46b5ab12016-10-26 13:21:33 -07005866 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
David Ahern1a3f0602016-10-17 19:15:44 -07005867
5868 if (&lower->list == &dev->adj_list.lower)
5869 return NULL;
5870
David Ahern46b5ab12016-10-26 13:21:33 -07005871 *iter = &lower->list;
David Ahern1a3f0602016-10-17 19:15:44 -07005872
5873 return lower->dev;
5874}
5875
5876int netdev_walk_all_lower_dev(struct net_device *dev,
5877 int (*fn)(struct net_device *dev,
5878 void *data),
5879 void *data)
5880{
5881 struct net_device *ldev;
5882 struct list_head *iter;
5883 int ret;
5884
5885 for (iter = &dev->adj_list.lower,
5886 ldev = netdev_next_lower_dev(dev, &iter);
5887 ldev;
5888 ldev = netdev_next_lower_dev(dev, &iter)) {
5889 /* first is the lower device itself */
5890 ret = fn(ldev, data);
5891 if (ret)
5892 return ret;
5893
5894 /* then look at all of its lower devices */
5895 ret = netdev_walk_all_lower_dev(ldev, fn, data);
5896 if (ret)
5897 return ret;
5898 }
5899
5900 return 0;
5901}
5902EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
5903
David Ahern1a3f0602016-10-17 19:15:44 -07005904static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5905 struct list_head **iter)
5906{
5907 struct netdev_adjacent *lower;
5908
5909 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5910 if (&lower->list == &dev->adj_list.lower)
5911 return NULL;
5912
5913 *iter = &lower->list;
5914
5915 return lower->dev;
5916}
5917
5918int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5919 int (*fn)(struct net_device *dev,
5920 void *data),
5921 void *data)
5922{
5923 struct net_device *ldev;
5924 struct list_head *iter;
5925 int ret;
5926
5927 for (iter = &dev->adj_list.lower,
5928 ldev = netdev_next_lower_dev_rcu(dev, &iter);
5929 ldev;
5930 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
5931 /* first is the lower device itself */
5932 ret = fn(ldev, data);
5933 if (ret)
5934 return ret;
5935
5936 /* then look at all of its lower devices */
5937 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
5938 if (ret)
5939 return ret;
5940 }
5941
5942 return 0;
5943}
5944EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
5945
Jiri Pirko7ce856a2016-07-04 08:23:12 +02005946/**
dingtianhonge001bfa2013-12-13 10:19:55 +08005947 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5948 * lower neighbour list, RCU
5949 * variant
5950 * @dev: device
5951 *
5952 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5953 * list. The caller must hold RCU read lock.
5954 */
5955void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5956{
5957 struct netdev_adjacent *lower;
5958
5959 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5960 struct netdev_adjacent, list);
5961 if (lower)
5962 return lower->private;
5963 return NULL;
5964}
5965EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5966
5967/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005968 * netdev_master_upper_dev_get_rcu - Get master upper device
5969 * @dev: device
5970 *
5971 * Find a master upper device and return pointer to it or NULL in case
5972 * it's not there. The caller must hold the RCU read lock.
5973 */
5974struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5975{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005976 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005977
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005978 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005979 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005980 if (upper && likely(upper->master))
5981 return upper->dev;
5982 return NULL;
5983}
5984EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5985
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305986static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005987 struct net_device *adj_dev,
5988 struct list_head *dev_list)
5989{
5990 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11005991
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005992 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5993 "upper_%s" : "lower_%s", adj_dev->name);
5994 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5995 linkname);
5996}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305997static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005998 char *name,
5999 struct list_head *dev_list)
6000{
6001 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11006002
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006003 sprintf(linkname, dev_list == &dev->adj_list.upper ?
6004 "upper_%s" : "lower_%s", name);
6005 sysfs_remove_link(&(dev->dev.kobj), linkname);
6006}
6007
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006008static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6009 struct net_device *adj_dev,
6010 struct list_head *dev_list)
6011{
6012 return (dev_list == &dev->adj_list.upper ||
6013 dev_list == &dev->adj_list.lower) &&
6014 net_eq(dev_net(dev), dev_net(adj_dev));
6015}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006016
Veaceslav Falico5d261912013-08-28 23:25:05 +02006017static int __netdev_adjacent_dev_insert(struct net_device *dev,
6018 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02006019 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006020 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006021{
6022 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006023 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006024
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006025 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006026
6027 if (adj) {
David Ahern790510d2016-10-17 19:15:43 -07006028 adj->ref_nr += 1;
David Ahern67b62f92016-10-17 19:15:53 -07006029 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6030 dev->name, adj_dev->name, adj->ref_nr);
6031
Veaceslav Falico5d261912013-08-28 23:25:05 +02006032 return 0;
6033 }
6034
6035 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6036 if (!adj)
6037 return -ENOMEM;
6038
6039 adj->dev = adj_dev;
6040 adj->master = master;
David Ahern790510d2016-10-17 19:15:43 -07006041 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006042 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006043 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006044
David Ahern67b62f92016-10-17 19:15:53 -07006045 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6046 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006047
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006048 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006049 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006050 if (ret)
6051 goto free_adj;
6052 }
6053
Veaceslav Falico7863c052013-09-25 09:20:06 +02006054 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006055 if (master) {
6056 ret = sysfs_create_link(&(dev->dev.kobj),
6057 &(adj_dev->dev.kobj), "master");
6058 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02006059 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006060
Veaceslav Falico7863c052013-09-25 09:20:06 +02006061 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006062 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02006063 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006064 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006065
6066 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006067
Veaceslav Falico5831d662013-09-25 09:20:32 +02006068remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006069 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006070 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006071free_adj:
6072 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02006073 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006074
6075 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006076}
6077
stephen hemminger1d143d92013-12-29 14:01:29 -08006078static void __netdev_adjacent_dev_remove(struct net_device *dev,
6079 struct net_device *adj_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006080 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006081 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006082{
6083 struct netdev_adjacent *adj;
6084
David Ahern67b62f92016-10-17 19:15:53 -07006085 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6086 dev->name, adj_dev->name, ref_nr);
6087
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006088 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006089
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006090 if (!adj) {
David Ahern67b62f92016-10-17 19:15:53 -07006091 pr_err("Adjacency does not exist for device %s from %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006092 dev->name, adj_dev->name);
David Ahern67b62f92016-10-17 19:15:53 -07006093 WARN_ON(1);
6094 return;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006095 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006096
Andrew Collins93409032016-10-03 13:43:02 -06006097 if (adj->ref_nr > ref_nr) {
David Ahern67b62f92016-10-17 19:15:53 -07006098 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6099 dev->name, adj_dev->name, ref_nr,
6100 adj->ref_nr - ref_nr);
Andrew Collins93409032016-10-03 13:43:02 -06006101 adj->ref_nr -= ref_nr;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006102 return;
6103 }
6104
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006105 if (adj->master)
6106 sysfs_remove_link(&(dev->dev.kobj), "master");
6107
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006108 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006109 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006110
Veaceslav Falico5d261912013-08-28 23:25:05 +02006111 list_del_rcu(&adj->list);
David Ahern67b62f92016-10-17 19:15:53 -07006112 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006113 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006114 dev_put(adj_dev);
6115 kfree_rcu(adj, rcu);
6116}
6117
stephen hemminger1d143d92013-12-29 14:01:29 -08006118static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6119 struct net_device *upper_dev,
6120 struct list_head *up_list,
6121 struct list_head *down_list,
6122 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006123{
6124 int ret;
6125
David Ahern790510d2016-10-17 19:15:43 -07006126 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
Andrew Collins93409032016-10-03 13:43:02 -06006127 private, master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006128 if (ret)
6129 return ret;
6130
David Ahern790510d2016-10-17 19:15:43 -07006131 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
Andrew Collins93409032016-10-03 13:43:02 -06006132 private, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006133 if (ret) {
David Ahern790510d2016-10-17 19:15:43 -07006134 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006135 return ret;
6136 }
6137
6138 return 0;
6139}
6140
stephen hemminger1d143d92013-12-29 14:01:29 -08006141static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6142 struct net_device *upper_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006143 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006144 struct list_head *up_list,
6145 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006146{
Andrew Collins93409032016-10-03 13:43:02 -06006147 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6148 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006149}
6150
stephen hemminger1d143d92013-12-29 14:01:29 -08006151static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6152 struct net_device *upper_dev,
6153 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006154{
David Ahernf1170fd2016-10-17 19:15:51 -07006155 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6156 &dev->adj_list.upper,
6157 &upper_dev->adj_list.lower,
6158 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006159}
6160
stephen hemminger1d143d92013-12-29 14:01:29 -08006161static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6162 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006163{
Andrew Collins93409032016-10-03 13:43:02 -06006164 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006165 &dev->adj_list.upper,
6166 &upper_dev->adj_list.lower);
6167}
Veaceslav Falico5d261912013-08-28 23:25:05 +02006168
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006169static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006170 struct net_device *upper_dev, bool master,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006171 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006172{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006173 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006174 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006175
6176 ASSERT_RTNL();
6177
6178 if (dev == upper_dev)
6179 return -EBUSY;
6180
6181 /* To prevent loops, check if dev is not upper device to upper_dev. */
David Ahernf1170fd2016-10-17 19:15:51 -07006182 if (netdev_has_upper_dev(upper_dev, dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006183 return -EBUSY;
6184
David Ahernf1170fd2016-10-17 19:15:51 -07006185 if (netdev_has_upper_dev(dev, upper_dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006186 return -EEXIST;
6187
6188 if (master && netdev_master_upper_dev_get(dev))
6189 return -EBUSY;
6190
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006191 changeupper_info.upper_dev = upper_dev;
6192 changeupper_info.master = master;
6193 changeupper_info.linking = true;
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006194 changeupper_info.upper_info = upper_info;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006195
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006196 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6197 &changeupper_info.info);
6198 ret = notifier_to_errno(ret);
6199 if (ret)
6200 return ret;
6201
Jiri Pirko6dffb042015-12-03 12:12:10 +01006202 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006203 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006204 if (ret)
6205 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006206
Ido Schimmelb03804e2015-12-03 12:12:03 +01006207 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6208 &changeupper_info.info);
6209 ret = notifier_to_errno(ret);
6210 if (ret)
David Ahernf1170fd2016-10-17 19:15:51 -07006211 goto rollback;
Ido Schimmelb03804e2015-12-03 12:12:03 +01006212
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006213 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006214
David Ahernf1170fd2016-10-17 19:15:51 -07006215rollback:
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006216 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006217
6218 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006219}
6220
6221/**
6222 * netdev_upper_dev_link - Add a link to the upper device
6223 * @dev: device
6224 * @upper_dev: new upper device
6225 *
6226 * Adds a link to device which is upper to this one. The caller must hold
6227 * the RTNL lock. On a failure a negative errno code is returned.
6228 * On success the reference counts are adjusted and the function
6229 * returns zero.
6230 */
6231int netdev_upper_dev_link(struct net_device *dev,
6232 struct net_device *upper_dev)
6233{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006234 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006235}
6236EXPORT_SYMBOL(netdev_upper_dev_link);
6237
6238/**
6239 * netdev_master_upper_dev_link - Add a master link to the upper device
6240 * @dev: device
6241 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01006242 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006243 * @upper_info: upper info to be passed down via notifier
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006244 *
6245 * Adds a link to device which is upper to this one. In this case, only
6246 * one master upper device can be linked, although other non-master devices
6247 * might be linked as well. The caller must hold the RTNL lock.
6248 * On a failure a negative errno code is returned. On success the reference
6249 * counts are adjusted and the function returns zero.
6250 */
6251int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01006252 struct net_device *upper_dev,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006253 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006254{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006255 return __netdev_upper_dev_link(dev, upper_dev, true,
6256 upper_priv, upper_info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006257}
6258EXPORT_SYMBOL(netdev_master_upper_dev_link);
6259
6260/**
6261 * netdev_upper_dev_unlink - Removes a link to upper device
6262 * @dev: device
6263 * @upper_dev: new upper device
6264 *
6265 * Removes a link to device which is upper to this one. The caller must hold
6266 * the RTNL lock.
6267 */
6268void netdev_upper_dev_unlink(struct net_device *dev,
6269 struct net_device *upper_dev)
6270{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006271 struct netdev_notifier_changeupper_info changeupper_info;
tchardingf4563a72017-02-09 17:56:07 +11006272
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006273 ASSERT_RTNL();
6274
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006275 changeupper_info.upper_dev = upper_dev;
6276 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
6277 changeupper_info.linking = false;
6278
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006279 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6280 &changeupper_info.info);
6281
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006282 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006283
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006284 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6285 &changeupper_info.info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006286}
6287EXPORT_SYMBOL(netdev_upper_dev_unlink);
6288
Moni Shoua61bd3852015-02-03 16:48:29 +02006289/**
6290 * netdev_bonding_info_change - Dispatch event about slave change
6291 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09006292 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02006293 *
6294 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6295 * The caller must hold the RTNL lock.
6296 */
6297void netdev_bonding_info_change(struct net_device *dev,
6298 struct netdev_bonding_info *bonding_info)
6299{
6300 struct netdev_notifier_bonding_info info;
6301
6302 memcpy(&info.bonding_info, bonding_info,
6303 sizeof(struct netdev_bonding_info));
6304 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
6305 &info.info);
6306}
6307EXPORT_SYMBOL(netdev_bonding_info_change);
6308
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006309static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006310{
6311 struct netdev_adjacent *iter;
6312
6313 struct net *net = dev_net(dev);
6314
6315 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006316 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006317 continue;
6318 netdev_adjacent_sysfs_add(iter->dev, dev,
6319 &iter->dev->adj_list.lower);
6320 netdev_adjacent_sysfs_add(dev, iter->dev,
6321 &dev->adj_list.upper);
6322 }
6323
6324 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006325 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006326 continue;
6327 netdev_adjacent_sysfs_add(iter->dev, dev,
6328 &iter->dev->adj_list.upper);
6329 netdev_adjacent_sysfs_add(dev, iter->dev,
6330 &dev->adj_list.lower);
6331 }
6332}
6333
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006334static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006335{
6336 struct netdev_adjacent *iter;
6337
6338 struct net *net = dev_net(dev);
6339
6340 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006341 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006342 continue;
6343 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6344 &iter->dev->adj_list.lower);
6345 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6346 &dev->adj_list.upper);
6347 }
6348
6349 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006350 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006351 continue;
6352 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6353 &iter->dev->adj_list.upper);
6354 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6355 &dev->adj_list.lower);
6356 }
6357}
6358
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006359void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02006360{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006361 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006362
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006363 struct net *net = dev_net(dev);
6364
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006365 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006366 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006367 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006368 netdev_adjacent_sysfs_del(iter->dev, oldname,
6369 &iter->dev->adj_list.lower);
6370 netdev_adjacent_sysfs_add(iter->dev, dev,
6371 &iter->dev->adj_list.lower);
6372 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006373
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006374 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006375 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006376 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006377 netdev_adjacent_sysfs_del(iter->dev, oldname,
6378 &iter->dev->adj_list.upper);
6379 netdev_adjacent_sysfs_add(iter->dev, dev,
6380 &iter->dev->adj_list.upper);
6381 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006382}
Veaceslav Falico402dae92013-09-25 09:20:09 +02006383
6384void *netdev_lower_dev_get_private(struct net_device *dev,
6385 struct net_device *lower_dev)
6386{
6387 struct netdev_adjacent *lower;
6388
6389 if (!lower_dev)
6390 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006391 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02006392 if (!lower)
6393 return NULL;
6394
6395 return lower->private;
6396}
6397EXPORT_SYMBOL(netdev_lower_dev_get_private);
6398
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006399
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006400int dev_get_nest_level(struct net_device *dev)
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006401{
6402 struct net_device *lower = NULL;
6403 struct list_head *iter;
6404 int max_nest = -1;
6405 int nest;
6406
6407 ASSERT_RTNL();
6408
6409 netdev_for_each_lower_dev(dev, lower, iter) {
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006410 nest = dev_get_nest_level(lower);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006411 if (max_nest < nest)
6412 max_nest = nest;
6413 }
6414
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006415 return max_nest + 1;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006416}
6417EXPORT_SYMBOL(dev_get_nest_level);
6418
Jiri Pirko04d48262015-12-03 12:12:15 +01006419/**
6420 * netdev_lower_change - Dispatch event about lower device state change
6421 * @lower_dev: device
6422 * @lower_state_info: state to dispatch
6423 *
6424 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6425 * The caller must hold the RTNL lock.
6426 */
6427void netdev_lower_state_changed(struct net_device *lower_dev,
6428 void *lower_state_info)
6429{
6430 struct netdev_notifier_changelowerstate_info changelowerstate_info;
6431
6432 ASSERT_RTNL();
6433 changelowerstate_info.lower_state_info = lower_state_info;
6434 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
6435 &changelowerstate_info.info);
6436}
6437EXPORT_SYMBOL(netdev_lower_state_changed);
6438
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006439static void dev_change_rx_flags(struct net_device *dev, int flags)
6440{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006441 const struct net_device_ops *ops = dev->netdev_ops;
6442
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05006443 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006444 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006445}
6446
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006447static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07006448{
Eric Dumazetb536db92011-11-30 21:42:26 +00006449 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006450 kuid_t uid;
6451 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07006452
Patrick McHardy24023452007-07-14 18:51:31 -07006453 ASSERT_RTNL();
6454
Wang Chendad9b332008-06-18 01:48:28 -07006455 dev->flags |= IFF_PROMISC;
6456 dev->promiscuity += inc;
6457 if (dev->promiscuity == 0) {
6458 /*
6459 * Avoid overflow.
6460 * If inc causes overflow, untouch promisc and return error.
6461 */
6462 if (inc < 0)
6463 dev->flags &= ~IFF_PROMISC;
6464 else {
6465 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006466 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6467 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006468 return -EOVERFLOW;
6469 }
6470 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006471 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006472 pr_info("device %s %s promiscuous mode\n",
6473 dev->name,
6474 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11006475 if (audit_enabled) {
6476 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006477 audit_log(current->audit_context, GFP_ATOMIC,
6478 AUDIT_ANOM_PROMISCUOUS,
6479 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6480 dev->name, (dev->flags & IFF_PROMISC),
6481 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07006482 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006483 from_kuid(&init_user_ns, uid),
6484 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006485 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11006486 }
Patrick McHardy24023452007-07-14 18:51:31 -07006487
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006488 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07006489 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006490 if (notify)
6491 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07006492 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006493}
6494
Linus Torvalds1da177e2005-04-16 15:20:36 -07006495/**
6496 * dev_set_promiscuity - update promiscuity count on a device
6497 * @dev: device
6498 * @inc: modifier
6499 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07006500 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006501 * remains above zero the interface remains promiscuous. Once it hits zero
6502 * the device reverts back to normal filtering operation. A negative inc
6503 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07006504 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006505 */
Wang Chendad9b332008-06-18 01:48:28 -07006506int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006507{
Eric Dumazetb536db92011-11-30 21:42:26 +00006508 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07006509 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006511 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07006512 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07006513 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07006514 if (dev->flags != old_flags)
6515 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07006516 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006517}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006518EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006519
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006520static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006521{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006522 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006523
Patrick McHardy24023452007-07-14 18:51:31 -07006524 ASSERT_RTNL();
6525
Linus Torvalds1da177e2005-04-16 15:20:36 -07006526 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07006527 dev->allmulti += inc;
6528 if (dev->allmulti == 0) {
6529 /*
6530 * Avoid overflow.
6531 * If inc causes overflow, untouch allmulti and return error.
6532 */
6533 if (inc < 0)
6534 dev->flags &= ~IFF_ALLMULTI;
6535 else {
6536 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006537 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6538 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006539 return -EOVERFLOW;
6540 }
6541 }
Patrick McHardy24023452007-07-14 18:51:31 -07006542 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006543 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07006544 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006545 if (notify)
6546 __dev_notify_flags(dev, old_flags,
6547 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07006548 }
Wang Chendad9b332008-06-18 01:48:28 -07006549 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006550}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006551
6552/**
6553 * dev_set_allmulti - update allmulti count on a device
6554 * @dev: device
6555 * @inc: modifier
6556 *
6557 * Add or remove reception of all multicast frames to a device. While the
6558 * count in the device remains above zero the interface remains listening
6559 * to all interfaces. Once it hits zero the device reverts back to normal
6560 * filtering operation. A negative @inc value is used to drop the counter
6561 * when releasing a resource needing all multicasts.
6562 * Return 0 if successful or a negative errno code on error.
6563 */
6564
6565int dev_set_allmulti(struct net_device *dev, int inc)
6566{
6567 return __dev_set_allmulti(dev, inc, true);
6568}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006569EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07006570
6571/*
6572 * Upload unicast and multicast address lists to device and
6573 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08006574 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07006575 * are present.
6576 */
6577void __dev_set_rx_mode(struct net_device *dev)
6578{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006579 const struct net_device_ops *ops = dev->netdev_ops;
6580
Patrick McHardy4417da62007-06-27 01:28:10 -07006581 /* dev_open will call this function so the list will stay sane. */
6582 if (!(dev->flags&IFF_UP))
6583 return;
6584
6585 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09006586 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07006587
Jiri Pirko01789342011-08-16 06:29:00 +00006588 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07006589 /* Unicast addresses changes may only happen under the rtnl,
6590 * therefore calling __dev_set_promiscuity here is safe.
6591 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006592 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006593 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006594 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006595 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006596 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006597 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07006598 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006599 }
Jiri Pirko01789342011-08-16 06:29:00 +00006600
6601 if (ops->ndo_set_rx_mode)
6602 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006603}
6604
6605void dev_set_rx_mode(struct net_device *dev)
6606{
David S. Millerb9e40852008-07-15 00:15:08 -07006607 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006608 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07006609 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006610}
6611
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006612/**
6613 * dev_get_flags - get flags reported to userspace
6614 * @dev: device
6615 *
6616 * Get the combination of flag bits exported through APIs to userspace.
6617 */
Eric Dumazet95c96172012-04-15 05:58:06 +00006618unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619{
Eric Dumazet95c96172012-04-15 05:58:06 +00006620 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621
6622 flags = (dev->flags & ~(IFF_PROMISC |
6623 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08006624 IFF_RUNNING |
6625 IFF_LOWER_UP |
6626 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07006627 (dev->gflags & (IFF_PROMISC |
6628 IFF_ALLMULTI));
6629
Stefan Rompfb00055a2006-03-20 17:09:11 -08006630 if (netif_running(dev)) {
6631 if (netif_oper_up(dev))
6632 flags |= IFF_RUNNING;
6633 if (netif_carrier_ok(dev))
6634 flags |= IFF_LOWER_UP;
6635 if (netif_dormant(dev))
6636 flags |= IFF_DORMANT;
6637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006638
6639 return flags;
6640}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006641EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006642
Patrick McHardybd380812010-02-26 06:34:53 +00006643int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006644{
Eric Dumazetb536db92011-11-30 21:42:26 +00006645 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00006646 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006647
Patrick McHardy24023452007-07-14 18:51:31 -07006648 ASSERT_RTNL();
6649
Linus Torvalds1da177e2005-04-16 15:20:36 -07006650 /*
6651 * Set the flags on our device.
6652 */
6653
6654 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6655 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6656 IFF_AUTOMEDIA)) |
6657 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6658 IFF_ALLMULTI));
6659
6660 /*
6661 * Load in the correct multicast list now the flags have changed.
6662 */
6663
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006664 if ((old_flags ^ flags) & IFF_MULTICAST)
6665 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07006666
Patrick McHardy4417da62007-06-27 01:28:10 -07006667 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006668
6669 /*
6670 * Have we downed the interface. We handle IFF_UP ourselves
6671 * according to user attempts to set it, rather than blindly
6672 * setting it.
6673 */
6674
6675 ret = 0;
Peter Pan(潘卫平)d215d102014-06-16 21:57:22 +08006676 if ((old_flags ^ flags) & IFF_UP)
Patrick McHardybd380812010-02-26 06:34:53 +00006677 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678
Linus Torvalds1da177e2005-04-16 15:20:36 -07006679 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006680 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006681 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006682
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006684
6685 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6686 if (dev->flags != old_flags)
6687 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006688 }
6689
6690 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
tchardingeb13da12017-02-09 17:56:06 +11006691 * is important. Some (broken) drivers set IFF_PROMISC, when
6692 * IFF_ALLMULTI is requested not asking us and not reporting.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006693 */
6694 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006695 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6696
Linus Torvalds1da177e2005-04-16 15:20:36 -07006697 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006698 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006699 }
6700
Patrick McHardybd380812010-02-26 06:34:53 +00006701 return ret;
6702}
6703
Nicolas Dichtela528c212013-09-25 12:02:44 +02006704void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6705 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00006706{
6707 unsigned int changes = dev->flags ^ old_flags;
6708
Nicolas Dichtela528c212013-09-25 12:02:44 +02006709 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006710 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006711
Patrick McHardybd380812010-02-26 06:34:53 +00006712 if (changes & IFF_UP) {
6713 if (dev->flags & IFF_UP)
6714 call_netdevice_notifiers(NETDEV_UP, dev);
6715 else
6716 call_netdevice_notifiers(NETDEV_DOWN, dev);
6717 }
6718
6719 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00006720 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6721 struct netdev_notifier_change_info change_info;
6722
6723 change_info.flags_changed = changes;
6724 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6725 &change_info.info);
6726 }
Patrick McHardybd380812010-02-26 06:34:53 +00006727}
6728
6729/**
6730 * dev_change_flags - change device settings
6731 * @dev: device
6732 * @flags: device state flags
6733 *
6734 * Change settings on device based state flags. The flags are
6735 * in the userspace exported format.
6736 */
Eric Dumazetb536db92011-11-30 21:42:26 +00006737int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00006738{
Eric Dumazetb536db92011-11-30 21:42:26 +00006739 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006740 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00006741
6742 ret = __dev_change_flags(dev, flags);
6743 if (ret < 0)
6744 return ret;
6745
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006746 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006747 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748 return ret;
6749}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006750EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006751
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006752static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6753{
6754 const struct net_device_ops *ops = dev->netdev_ops;
6755
6756 if (ops->ndo_change_mtu)
6757 return ops->ndo_change_mtu(dev, new_mtu);
6758
6759 dev->mtu = new_mtu;
6760 return 0;
6761}
6762
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006763/**
6764 * dev_set_mtu - Change maximum transfer unit
6765 * @dev: device
6766 * @new_mtu: new transfer unit
6767 *
6768 * Change the maximum transfer size of the network device.
6769 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006770int dev_set_mtu(struct net_device *dev, int new_mtu)
6771{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006772 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006773
6774 if (new_mtu == dev->mtu)
6775 return 0;
6776
Jarod Wilson61e84622016-10-07 22:04:33 -04006777 /* MTU must be positive, and in range */
6778 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6779 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6780 dev->name, new_mtu, dev->min_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006781 return -EINVAL;
Jarod Wilson61e84622016-10-07 22:04:33 -04006782 }
6783
6784 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6785 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
Jakub Kicinskia0e65de2016-10-17 18:02:22 +01006786 dev->name, new_mtu, dev->max_mtu);
Jarod Wilson61e84622016-10-07 22:04:33 -04006787 return -EINVAL;
6788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006789
6790 if (!netif_device_present(dev))
6791 return -ENODEV;
6792
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01006793 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6794 err = notifier_to_errno(err);
6795 if (err)
6796 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006797
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006798 orig_mtu = dev->mtu;
6799 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006800
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006801 if (!err) {
6802 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6803 err = notifier_to_errno(err);
6804 if (err) {
6805 /* setting mtu back and notifying everyone again,
6806 * so that they have a chance to revert changes.
6807 */
6808 __dev_set_mtu(dev, orig_mtu);
6809 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6810 }
6811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006812 return err;
6813}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006814EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006815
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006816/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006817 * dev_set_group - Change group this device belongs to
6818 * @dev: device
6819 * @new_group: group this device should belong to
6820 */
6821void dev_set_group(struct net_device *dev, int new_group)
6822{
6823 dev->group = new_group;
6824}
6825EXPORT_SYMBOL(dev_set_group);
6826
6827/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006828 * dev_set_mac_address - Change Media Access Control Address
6829 * @dev: device
6830 * @sa: new address
6831 *
6832 * Change the hardware (MAC) address of the device
6833 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006834int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6835{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006836 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006837 int err;
6838
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006839 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006840 return -EOPNOTSUPP;
6841 if (sa->sa_family != dev->type)
6842 return -EINVAL;
6843 if (!netif_device_present(dev))
6844 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006845 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00006846 if (err)
6847 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00006848 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00006849 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006850 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00006851 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006852}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006853EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006854
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006855/**
6856 * dev_change_carrier - Change device carrier
6857 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00006858 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006859 *
6860 * Change device carrier
6861 */
6862int dev_change_carrier(struct net_device *dev, bool new_carrier)
6863{
6864 const struct net_device_ops *ops = dev->netdev_ops;
6865
6866 if (!ops->ndo_change_carrier)
6867 return -EOPNOTSUPP;
6868 if (!netif_device_present(dev))
6869 return -ENODEV;
6870 return ops->ndo_change_carrier(dev, new_carrier);
6871}
6872EXPORT_SYMBOL(dev_change_carrier);
6873
Linus Torvalds1da177e2005-04-16 15:20:36 -07006874/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02006875 * dev_get_phys_port_id - Get device physical port ID
6876 * @dev: device
6877 * @ppid: port ID
6878 *
6879 * Get device physical port ID
6880 */
6881int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01006882 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02006883{
6884 const struct net_device_ops *ops = dev->netdev_ops;
6885
6886 if (!ops->ndo_get_phys_port_id)
6887 return -EOPNOTSUPP;
6888 return ops->ndo_get_phys_port_id(dev, ppid);
6889}
6890EXPORT_SYMBOL(dev_get_phys_port_id);
6891
6892/**
David Aherndb24a902015-03-17 20:23:15 -06006893 * dev_get_phys_port_name - Get device physical port name
6894 * @dev: device
6895 * @name: port name
Luis de Bethencourted49e652016-03-21 16:31:14 +00006896 * @len: limit of bytes to copy to name
David Aherndb24a902015-03-17 20:23:15 -06006897 *
6898 * Get device physical port name
6899 */
6900int dev_get_phys_port_name(struct net_device *dev,
6901 char *name, size_t len)
6902{
6903 const struct net_device_ops *ops = dev->netdev_ops;
6904
6905 if (!ops->ndo_get_phys_port_name)
6906 return -EOPNOTSUPP;
6907 return ops->ndo_get_phys_port_name(dev, name, len);
6908}
6909EXPORT_SYMBOL(dev_get_phys_port_name);
6910
6911/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006912 * dev_change_proto_down - update protocol port state information
6913 * @dev: device
6914 * @proto_down: new value
6915 *
6916 * This info can be used by switch drivers to set the phys state of the
6917 * port.
6918 */
6919int dev_change_proto_down(struct net_device *dev, bool proto_down)
6920{
6921 const struct net_device_ops *ops = dev->netdev_ops;
6922
6923 if (!ops->ndo_change_proto_down)
6924 return -EOPNOTSUPP;
6925 if (!netif_device_present(dev))
6926 return -ENODEV;
6927 return ops->ndo_change_proto_down(dev, proto_down);
6928}
6929EXPORT_SYMBOL(dev_change_proto_down);
6930
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006931bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
6932{
6933 struct netdev_xdp xdp;
6934
6935 memset(&xdp, 0, sizeof(xdp));
6936 xdp.command = XDP_QUERY_PROG;
6937
6938 /* Query must always succeed. */
6939 WARN_ON(xdp_op(dev, &xdp) < 0);
6940 return xdp.prog_attached;
6941}
6942
6943static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
6944 struct netlink_ext_ack *extack,
6945 struct bpf_prog *prog)
6946{
6947 struct netdev_xdp xdp;
6948
6949 memset(&xdp, 0, sizeof(xdp));
6950 xdp.command = XDP_SETUP_PROG;
6951 xdp.extack = extack;
6952 xdp.prog = prog;
6953
6954 return xdp_op(dev, &xdp);
6955}
6956
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006957/**
Brenden Blancoa7862b42016-07-19 12:16:48 -07006958 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6959 * @dev: device
Jakub Kicinskib5d60982017-05-01 15:53:43 -07006960 * @extack: netlink extended ack
Brenden Blancoa7862b42016-07-19 12:16:48 -07006961 * @fd: new program fd or negative value to clear
Daniel Borkmann85de8572016-11-28 23:16:54 +01006962 * @flags: xdp-related flags
Brenden Blancoa7862b42016-07-19 12:16:48 -07006963 *
6964 * Set or clear a bpf program for a device
6965 */
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07006966int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6967 int fd, u32 flags)
Brenden Blancoa7862b42016-07-19 12:16:48 -07006968{
6969 const struct net_device_ops *ops = dev->netdev_ops;
6970 struct bpf_prog *prog = NULL;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006971 xdp_op_t xdp_op, xdp_chk;
Brenden Blancoa7862b42016-07-19 12:16:48 -07006972 int err;
6973
Daniel Borkmann85de8572016-11-28 23:16:54 +01006974 ASSERT_RTNL();
6975
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006976 xdp_op = xdp_chk = ops->ndo_xdp;
Daniel Borkmann0489df92017-05-12 01:04:45 +02006977 if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE))
6978 return -EOPNOTSUPP;
David S. Millerb5cdae32017-04-18 15:36:58 -04006979 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6980 xdp_op = generic_xdp_install;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006981 if (xdp_op == xdp_chk)
6982 xdp_chk = generic_xdp_install;
David S. Millerb5cdae32017-04-18 15:36:58 -04006983
Brenden Blancoa7862b42016-07-19 12:16:48 -07006984 if (fd >= 0) {
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006985 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
6986 return -EEXIST;
6987 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
6988 __dev_xdp_attached(dev, xdp_op))
6989 return -EBUSY;
Daniel Borkmann85de8572016-11-28 23:16:54 +01006990
Brenden Blancoa7862b42016-07-19 12:16:48 -07006991 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6992 if (IS_ERR(prog))
6993 return PTR_ERR(prog);
6994 }
6995
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02006996 err = dev_xdp_install(dev, xdp_op, extack, prog);
Brenden Blancoa7862b42016-07-19 12:16:48 -07006997 if (err < 0 && prog)
6998 bpf_prog_put(prog);
6999
7000 return err;
7001}
Brenden Blancoa7862b42016-07-19 12:16:48 -07007002
7003/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007004 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07007005 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07007006 *
7007 * Returns a suitable unique value for a new device interface
7008 * number. The caller must hold the rtnl semaphore or the
7009 * dev_base_lock to be sure it remains unique.
7010 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07007011static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007012{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00007013 int ifindex = net->ifindex;
tchardingf4563a72017-02-09 17:56:07 +11007014
Linus Torvalds1da177e2005-04-16 15:20:36 -07007015 for (;;) {
7016 if (++ifindex <= 0)
7017 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007018 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00007019 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007020 }
7021}
7022
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08007024static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07007025DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026
Stephen Hemminger6f05f622007-03-08 20:46:03 -08007027static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007028{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007029 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007030 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007031}
7032
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007033static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007034{
Krishna Kumare93737b2009-12-08 22:26:02 +00007035 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007036 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007037
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007038 BUG_ON(dev_boot_phase);
7039 ASSERT_RTNL();
7040
Krishna Kumare93737b2009-12-08 22:26:02 +00007041 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007042 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00007043 * for initialization unwind. Remove those
7044 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007045 */
7046 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007047 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7048 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007049
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007050 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00007051 list_del(&dev->unreg_list);
7052 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007053 }
Eric Dumazet449f4542011-05-19 12:24:16 +00007054 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007055 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00007056 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007057
Octavian Purdila44345722010-12-13 12:44:07 +00007058 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007059 list_for_each_entry(dev, head, unreg_list)
7060 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04007061 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007062
Octavian Purdila44345722010-12-13 12:44:07 +00007063 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007064 /* And unlink it from device chain. */
7065 unlist_netdevice(dev);
7066
7067 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007068 }
Eric Dumazet41852492016-08-26 12:50:39 -07007069 flush_all_backlogs();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007070
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007071 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007072
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007073 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007074 struct sk_buff *skb = NULL;
7075
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007076 /* Shutdown queueing discipline. */
7077 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007078
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007079
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007080 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11007081 * this device. They should clean all the things.
7082 */
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007083 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7084
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007085 if (!dev->rtnl_link_ops ||
7086 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04007087 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007088 GFP_KERNEL);
7089
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007090 /*
7091 * Flush the unicast and multicast chains
7092 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007093 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00007094 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007095
7096 if (dev->netdev_ops->ndo_uninit)
7097 dev->netdev_ops->ndo_uninit(dev);
7098
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007099 if (skb)
7100 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07007101
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007102 /* Notifier chain MUST detach us all upper devices. */
7103 WARN_ON(netdev_has_any_upper_dev(dev));
David Ahern0f524a82016-10-17 19:15:52 -07007104 WARN_ON(netdev_has_any_lower_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007105
7106 /* Remove entries from kobject tree */
7107 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00007108#ifdef CONFIG_XPS
7109 /* Remove XPS queueing entries */
7110 netif_reset_xps_queues_gt(dev, 0);
7111#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007112 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007113
Eric W. Biederman850a5452011-10-13 22:25:23 +00007114 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007115
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00007116 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007117 dev_put(dev);
7118}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007119
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007120static void rollback_registered(struct net_device *dev)
7121{
7122 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007123
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007124 list_add(&dev->unreg_list, &single);
7125 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007126 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007127}
7128
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007129static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7130 struct net_device *upper, netdev_features_t features)
7131{
7132 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7133 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007134 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007135
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007136 for_each_netdev_feature(&upper_disables, feature_bit) {
7137 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007138 if (!(upper->wanted_features & feature)
7139 && (features & feature)) {
7140 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7141 &feature, upper->name);
7142 features &= ~feature;
7143 }
7144 }
7145
7146 return features;
7147}
7148
7149static void netdev_sync_lower_features(struct net_device *upper,
7150 struct net_device *lower, netdev_features_t features)
7151{
7152 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7153 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007154 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007155
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007156 for_each_netdev_feature(&upper_disables, feature_bit) {
7157 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007158 if (!(features & feature) && (lower->features & feature)) {
7159 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7160 &feature, lower->name);
7161 lower->wanted_features &= ~feature;
7162 netdev_update_features(lower);
7163
7164 if (unlikely(lower->features & feature))
7165 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7166 &feature, lower->name);
7167 }
7168 }
7169}
7170
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007171static netdev_features_t netdev_fix_features(struct net_device *dev,
7172 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07007173{
Michał Mirosław57422dc2011-01-22 12:14:12 +00007174 /* Fix illegal checksum combinations */
7175 if ((features & NETIF_F_HW_CSUM) &&
7176 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007177 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00007178 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7179 }
7180
Herbert Xub63365a2008-10-23 01:11:29 -07007181 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007182 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007183 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007184 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07007185 }
7186
Pravin B Shelarec5f0612013-03-07 09:28:01 +00007187 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7188 !(features & NETIF_F_IP_CSUM)) {
7189 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7190 features &= ~NETIF_F_TSO;
7191 features &= ~NETIF_F_TSO_ECN;
7192 }
7193
7194 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7195 !(features & NETIF_F_IPV6_CSUM)) {
7196 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7197 features &= ~NETIF_F_TSO6;
7198 }
7199
Alexander Duyckb1dc4972016-05-02 09:38:24 -07007200 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7201 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7202 features &= ~NETIF_F_TSO_MANGLEID;
7203
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00007204 /* TSO ECN requires that TSO is present as well. */
7205 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7206 features &= ~NETIF_F_TSO_ECN;
7207
Michał Mirosław212b5732011-02-15 16:59:16 +00007208 /* Software GSO depends on SG. */
7209 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007210 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00007211 features &= ~NETIF_F_GSO;
7212 }
7213
Michał Mirosławacd11302011-01-24 15:45:15 -08007214 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07007215 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00007216 /* maybe split UFO into V4 and V6? */
Tom Herbertc8cd0982015-12-14 11:19:44 -08007217 if (!(features & NETIF_F_HW_CSUM) &&
7218 ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
7219 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007220 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08007221 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07007222 features &= ~NETIF_F_UFO;
7223 }
7224
7225 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007226 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08007227 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07007228 features &= ~NETIF_F_UFO;
7229 }
7230 }
7231
Alexander Duyck802ab552016-04-10 21:45:03 -04007232 /* GSO partial features require GSO partial be set */
7233 if ((features & dev->gso_partial_features) &&
7234 !(features & NETIF_F_GSO_PARTIAL)) {
7235 netdev_dbg(dev,
7236 "Dropping partially supported GSO features since no GSO partial.\n");
7237 features &= ~dev->gso_partial_features;
7238 }
7239
Herbert Xub63365a2008-10-23 01:11:29 -07007240 return features;
7241}
Herbert Xub63365a2008-10-23 01:11:29 -07007242
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007243int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00007244{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007245 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007246 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007247 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05007248 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00007249
Michał Mirosław87267482011-04-12 09:56:38 +00007250 ASSERT_RTNL();
7251
Michał Mirosław5455c692011-02-15 16:59:17 +00007252 features = netdev_get_wanted_features(dev);
7253
7254 if (dev->netdev_ops->ndo_fix_features)
7255 features = dev->netdev_ops->ndo_fix_features(dev, features);
7256
7257 /* driver might be less strict about feature dependencies */
7258 features = netdev_fix_features(dev, features);
7259
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007260 /* some features can't be enabled if they're off an an upper device */
7261 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7262 features = netdev_sync_upper_features(dev, upper, features);
7263
Michał Mirosław5455c692011-02-15 16:59:17 +00007264 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05007265 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00007266
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007267 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7268 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00007269
7270 if (dev->netdev_ops->ndo_set_features)
7271 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01007272 else
7273 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00007274
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007275 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00007276 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007277 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7278 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01007279 /* return non-0 since some features might have changed and
7280 * it's better to fire a spurious notification than miss it
7281 */
7282 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007283 }
7284
Jarod Wilsone7868a82015-11-03 23:09:32 -05007285sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007286 /* some features must be disabled on lower devices when disabled
7287 * on an upper device (think: bonding master or bridge)
7288 */
7289 netdev_for_each_lower_dev(dev, lower, iter)
7290 netdev_sync_lower_features(dev, lower, features);
7291
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007292 if (!err)
7293 dev->features = features;
7294
Jarod Wilsone7868a82015-11-03 23:09:32 -05007295 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007296}
7297
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007298/**
7299 * netdev_update_features - recalculate device features
7300 * @dev: the device to check
7301 *
7302 * Recalculate dev->features set and send notifications if it
7303 * has changed. Should be called after driver or hardware dependent
7304 * conditions might have changed that influence the features.
7305 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007306void netdev_update_features(struct net_device *dev)
7307{
7308 if (__netdev_update_features(dev))
7309 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00007310}
7311EXPORT_SYMBOL(netdev_update_features);
7312
Linus Torvalds1da177e2005-04-16 15:20:36 -07007313/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007314 * netdev_change_features - recalculate device features
7315 * @dev: the device to check
7316 *
7317 * Recalculate dev->features set and send notifications even
7318 * if they have not changed. Should be called instead of
7319 * netdev_update_features() if also dev->vlan_features might
7320 * have changed to allow the changes to be propagated to stacked
7321 * VLAN devices.
7322 */
7323void netdev_change_features(struct net_device *dev)
7324{
7325 __netdev_update_features(dev);
7326 netdev_features_change(dev);
7327}
7328EXPORT_SYMBOL(netdev_change_features);
7329
7330/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007331 * netif_stacked_transfer_operstate - transfer operstate
7332 * @rootdev: the root or lower level device to transfer state from
7333 * @dev: the device to transfer operstate to
7334 *
7335 * Transfer operational state from root to device. This is normally
7336 * called when a stacking relationship exists between the root
7337 * device and the device(a leaf device).
7338 */
7339void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7340 struct net_device *dev)
7341{
7342 if (rootdev->operstate == IF_OPER_DORMANT)
7343 netif_dormant_on(dev);
7344 else
7345 netif_dormant_off(dev);
7346
Zhang Shengju0575c862017-04-26 17:49:38 +08007347 if (netif_carrier_ok(rootdev))
7348 netif_carrier_on(dev);
7349 else
7350 netif_carrier_off(dev);
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007351}
7352EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7353
Michael Daltona953be52014-01-16 22:23:28 -08007354#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007355static int netif_alloc_rx_queues(struct net_device *dev)
7356{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007357 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00007358 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05307359 size_t sz = count * sizeof(*rx);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007360
Tom Herbertbd25fa72010-10-18 18:00:16 +00007361 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007362
Michal Hockoda6bc572017-05-08 15:57:31 -07007363 rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
7364 if (!rx)
7365 return -ENOMEM;
7366
Tom Herbertbd25fa72010-10-18 18:00:16 +00007367 dev->_rx = rx;
7368
Tom Herbertbd25fa72010-10-18 18:00:16 +00007369 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00007370 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007371 return 0;
7372}
Tom Herbertbf264142010-11-26 08:36:09 +00007373#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007374
Changli Gaoaa942102010-12-04 02:31:41 +00007375static void netdev_init_one_queue(struct net_device *dev,
7376 struct netdev_queue *queue, void *_unused)
7377{
7378 /* Initialize queue lock */
7379 spin_lock_init(&queue->_xmit_lock);
7380 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7381 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00007382 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00007383 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00007384#ifdef CONFIG_BQL
7385 dql_init(&queue->dql, HZ);
7386#endif
Changli Gaoaa942102010-12-04 02:31:41 +00007387}
7388
Eric Dumazet60877a32013-06-20 01:15:51 -07007389static void netif_free_tx_queues(struct net_device *dev)
7390{
WANG Cong4cb28972014-06-02 15:55:22 -07007391 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07007392}
7393
Tom Herberte6484932010-10-18 18:04:39 +00007394static int netif_alloc_netdev_queues(struct net_device *dev)
7395{
7396 unsigned int count = dev->num_tx_queues;
7397 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07007398 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00007399
Eric Dumazetd3397272015-07-06 17:13:26 +02007400 if (count < 1 || count > 0xffff)
7401 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00007402
Michal Hockoda6bc572017-05-08 15:57:31 -07007403 tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
7404 if (!tx)
7405 return -ENOMEM;
7406
Tom Herberte6484932010-10-18 18:04:39 +00007407 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00007408
Tom Herberte6484932010-10-18 18:04:39 +00007409 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7410 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00007411
7412 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00007413}
7414
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007415void netif_tx_stop_all_queues(struct net_device *dev)
7416{
7417 unsigned int i;
7418
7419 for (i = 0; i < dev->num_tx_queues; i++) {
7420 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
tchardingf4563a72017-02-09 17:56:07 +11007421
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007422 netif_tx_stop_queue(txq);
7423 }
7424}
7425EXPORT_SYMBOL(netif_tx_stop_all_queues);
7426
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007427/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007428 * register_netdevice - register a network device
7429 * @dev: device to register
7430 *
7431 * Take a completed network device structure and add it to the kernel
7432 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7433 * chain. 0 is returned on success. A negative errno code is returned
7434 * on a failure to set up the device, or if the name is a duplicate.
7435 *
7436 * Callers must hold the rtnl semaphore. You may want
7437 * register_netdev() instead of this.
7438 *
7439 * BUGS:
7440 * The locking appears insufficient to guarantee two parallel registers
7441 * will not get the same name.
7442 */
7443
7444int register_netdevice(struct net_device *dev)
7445{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007446 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007447 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448
7449 BUG_ON(dev_boot_phase);
7450 ASSERT_RTNL();
7451
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007452 might_sleep();
7453
Linus Torvalds1da177e2005-04-16 15:20:36 -07007454 /* When net_device's are persistent, this will be fatal. */
7455 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007456 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457
David S. Millerf1f28aa2008-07-15 00:08:33 -07007458 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07007459 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007460
Gao feng828de4f2012-09-13 20:58:27 +00007461 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00007462 if (ret < 0)
7463 goto out;
7464
Linus Torvalds1da177e2005-04-16 15:20:36 -07007465 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007466 if (dev->netdev_ops->ndo_init) {
7467 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007468 if (ret) {
7469 if (ret > 0)
7470 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08007471 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007472 }
7473 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007474
Patrick McHardyf6469682013-04-19 02:04:27 +00007475 if (((dev->hw_features | dev->features) &
7476 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00007477 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7478 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7479 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7480 ret = -EINVAL;
7481 goto err_uninit;
7482 }
7483
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00007484 ret = -EBUSY;
7485 if (!dev->ifindex)
7486 dev->ifindex = dev_new_index(net);
7487 else if (__dev_get_by_index(net, dev->ifindex))
7488 goto err_uninit;
7489
Michał Mirosław5455c692011-02-15 16:59:17 +00007490 /* Transfer changeable features to wanted_features and enable
7491 * software offloads (GSO and GRO).
7492 */
7493 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00007494 dev->features |= NETIF_F_SOFT_FEATURES;
7495 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007496
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007497 if (!(dev->flags & IFF_LOOPBACK))
Michał Mirosław34324dc2011-11-15 15:29:55 +00007498 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007499
Alexander Duyck7f348a62016-04-20 16:51:00 -04007500 /* If IPv4 TCP segmentation offload is supported we should also
7501 * allow the device to enable segmenting the frame with the option
7502 * of ignoring a static IP ID value. This doesn't enable the
7503 * feature itself but allows the user to enable it later.
7504 */
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007505 if (dev->hw_features & NETIF_F_TSO)
7506 dev->hw_features |= NETIF_F_TSO_MANGLEID;
Alexander Duyck7f348a62016-04-20 16:51:00 -04007507 if (dev->vlan_features & NETIF_F_TSO)
7508 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7509 if (dev->mpls_features & NETIF_F_TSO)
7510 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7511 if (dev->hw_enc_features & NETIF_F_TSO)
7512 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07007513
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007514 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00007515 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007516 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00007517
Pravin B Shelaree579672013-03-07 09:28:08 +00007518 /* Make NETIF_F_SG inheritable to tunnel devices.
7519 */
Alexander Duyck802ab552016-04-10 21:45:03 -04007520 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
Pravin B Shelaree579672013-03-07 09:28:08 +00007521
Simon Horman0d89d202013-05-23 21:02:52 +00007522 /* Make NETIF_F_SG inheritable to MPLS.
7523 */
7524 dev->mpls_features |= NETIF_F_SG;
7525
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00007526 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7527 ret = notifier_to_errno(ret);
7528 if (ret)
7529 goto err_uninit;
7530
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007531 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007532 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007533 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007534 dev->reg_state = NETREG_REGISTERED;
7535
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007536 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00007537
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538 /*
7539 * Default initial state at registry is that the
7540 * device is present.
7541 */
7542
7543 set_bit(__LINK_STATE_PRESENT, &dev->state);
7544
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01007545 linkwatch_init_dev(dev);
7546
Linus Torvalds1da177e2005-04-16 15:20:36 -07007547 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007548 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007549 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04007550 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007551
Jiri Pirko948b3372013-01-08 01:38:25 +00007552 /* If the device has permanent device address, driver should
7553 * set dev_addr and also addr_assign_type should be set to
7554 * NET_ADDR_PERM (default value).
7555 */
7556 if (dev->addr_assign_type == NET_ADDR_PERM)
7557 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7558
Linus Torvalds1da177e2005-04-16 15:20:36 -07007559 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007560 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07007561 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007562 if (ret) {
7563 rollback_registered(dev);
7564 dev->reg_state = NETREG_UNREGISTERED;
7565 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007566 /*
7567 * Prevent userspace races by waiting until the network
7568 * device is fully setup before sending notifications.
7569 */
Patrick McHardya2835762010-02-26 06:34:51 +00007570 if (!dev->rtnl_link_ops ||
7571 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007572 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573
7574out:
7575 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007576
7577err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007578 if (dev->netdev_ops->ndo_uninit)
7579 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007580 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007581}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007582EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007583
7584/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007585 * init_dummy_netdev - init a dummy network device for NAPI
7586 * @dev: device to init
7587 *
7588 * This takes a network device structure and initialize the minimum
7589 * amount of fields so it can be used to schedule NAPI polls without
7590 * registering a full blown interface. This is to be used by drivers
7591 * that need to tie several hardware interfaces to a single NAPI
7592 * poll scheduler due to HW limitations.
7593 */
7594int init_dummy_netdev(struct net_device *dev)
7595{
7596 /* Clear everything. Note we don't initialize spinlocks
7597 * are they aren't supposed to be taken by any of the
7598 * NAPI code and this dummy netdev is supposed to be
7599 * only ever used for NAPI polls
7600 */
7601 memset(dev, 0, sizeof(struct net_device));
7602
7603 /* make sure we BUG if trying to hit standard
7604 * register/unregister code path
7605 */
7606 dev->reg_state = NETREG_DUMMY;
7607
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007608 /* NAPI wants this */
7609 INIT_LIST_HEAD(&dev->napi_list);
7610
7611 /* a dummy interface is started by default */
7612 set_bit(__LINK_STATE_PRESENT, &dev->state);
7613 set_bit(__LINK_STATE_START, &dev->state);
7614
Eric Dumazet29b44332010-10-11 10:22:12 +00007615 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7616 * because users of this 'device' dont need to change
7617 * its refcount.
7618 */
7619
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007620 return 0;
7621}
7622EXPORT_SYMBOL_GPL(init_dummy_netdev);
7623
7624
7625/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007626 * register_netdev - register a network device
7627 * @dev: device to register
7628 *
7629 * Take a completed network device structure and add it to the kernel
7630 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7631 * chain. 0 is returned on success. A negative errno code is returned
7632 * on a failure to set up the device, or if the name is a duplicate.
7633 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07007634 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07007635 * and expands the device name if you passed a format string to
7636 * alloc_netdev.
7637 */
7638int register_netdev(struct net_device *dev)
7639{
7640 int err;
7641
7642 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007643 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007644 rtnl_unlock();
7645 return err;
7646}
7647EXPORT_SYMBOL(register_netdev);
7648
Eric Dumazet29b44332010-10-11 10:22:12 +00007649int netdev_refcnt_read(const struct net_device *dev)
7650{
7651 int i, refcnt = 0;
7652
7653 for_each_possible_cpu(i)
7654 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7655 return refcnt;
7656}
7657EXPORT_SYMBOL(netdev_refcnt_read);
7658
Ben Hutchings2c530402012-07-10 10:55:09 +00007659/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007660 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00007661 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07007662 *
7663 * This is called when unregistering network devices.
7664 *
7665 * Any protocol or device that holds a reference should register
7666 * for netdevice notification, and cleanup and put back the
7667 * reference if they receive an UNREGISTER event.
7668 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007669 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007670 */
7671static void netdev_wait_allrefs(struct net_device *dev)
7672{
7673 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00007674 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007675
Eric Dumazete014deb2009-11-17 05:59:21 +00007676 linkwatch_forget_dev(dev);
7677
Linus Torvalds1da177e2005-04-16 15:20:36 -07007678 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00007679 refcnt = netdev_refcnt_read(dev);
7680
7681 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007682 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007683 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007684
7685 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007686 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007687
Eric Dumazet748e2d92012-08-22 21:50:59 +00007688 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007689 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00007690 rtnl_lock();
7691
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007692 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007693 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7694 &dev->state)) {
7695 /* We must not have linkwatch events
7696 * pending on unregister. If this
7697 * happens, we simply run the queue
7698 * unscheduled, resulting in a noop
7699 * for this device.
7700 */
7701 linkwatch_run_queue();
7702 }
7703
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007704 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007705
7706 rebroadcast_time = jiffies;
7707 }
7708
7709 msleep(250);
7710
Eric Dumazet29b44332010-10-11 10:22:12 +00007711 refcnt = netdev_refcnt_read(dev);
7712
Linus Torvalds1da177e2005-04-16 15:20:36 -07007713 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007714 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7715 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716 warning_time = jiffies;
7717 }
7718 }
7719}
7720
7721/* The sequence is:
7722 *
7723 * rtnl_lock();
7724 * ...
7725 * register_netdevice(x1);
7726 * register_netdevice(x2);
7727 * ...
7728 * unregister_netdevice(y1);
7729 * unregister_netdevice(y2);
7730 * ...
7731 * rtnl_unlock();
7732 * free_netdev(y1);
7733 * free_netdev(y2);
7734 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07007735 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007736 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007737 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07007738 * without deadlocking with linkwatch via keventd.
7739 * 2) Since we run with the RTNL semaphore not held, we can sleep
7740 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07007741 *
7742 * We must not return until all unregister events added during
7743 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007744 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007745void netdev_run_todo(void)
7746{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007747 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007748
Linus Torvalds1da177e2005-04-16 15:20:36 -07007749 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007750 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07007751
7752 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007753
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007754
7755 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00007756 if (!list_empty(&list))
7757 rcu_barrier();
7758
Linus Torvalds1da177e2005-04-16 15:20:36 -07007759 while (!list_empty(&list)) {
7760 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00007761 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007762 list_del(&dev->todo_list);
7763
Eric Dumazet748e2d92012-08-22 21:50:59 +00007764 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007765 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00007766 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007767
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007768 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007769 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007770 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007771 dump_stack();
7772 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007773 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007774
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007775 dev->reg_state = NETREG_UNREGISTERED;
7776
7777 netdev_wait_allrefs(dev);
7778
7779 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00007780 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08007781 BUG_ON(!list_empty(&dev->ptype_all));
7782 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00007783 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7784 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07007785 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007786
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007787 if (dev->destructor)
7788 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007789
Eric W. Biederman50624c92013-09-23 21:19:49 -07007790 /* Report a network device has been unregistered */
7791 rtnl_lock();
7792 dev_net(dev)->dev_unreg_count--;
7793 __rtnl_unlock();
7794 wake_up(&netdev_unregistering_wq);
7795
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007796 /* Free network device */
7797 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007799}
7800
Jarod Wilson92566452016-02-01 18:51:04 -05007801/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7802 * all the same fields in the same order as net_device_stats, with only
7803 * the type differing, but rtnl_link_stats64 may have additional fields
7804 * at the end for newer counters.
Ben Hutchings3cfde792010-07-09 09:11:52 +00007805 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007806void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7807 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00007808{
7809#if BITS_PER_LONG == 64
Jarod Wilson92566452016-02-01 18:51:04 -05007810 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007811 memcpy(stats64, netdev_stats, sizeof(*stats64));
Jarod Wilson92566452016-02-01 18:51:04 -05007812 /* zero out counters that only exist in rtnl_link_stats64 */
7813 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7814 sizeof(*stats64) - sizeof(*netdev_stats));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007815#else
Jarod Wilson92566452016-02-01 18:51:04 -05007816 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007817 const unsigned long *src = (const unsigned long *)netdev_stats;
7818 u64 *dst = (u64 *)stats64;
7819
Jarod Wilson92566452016-02-01 18:51:04 -05007820 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007821 for (i = 0; i < n; i++)
7822 dst[i] = src[i];
Jarod Wilson92566452016-02-01 18:51:04 -05007823 /* zero out counters that only exist in rtnl_link_stats64 */
7824 memset((char *)stats64 + n * sizeof(u64), 0,
7825 sizeof(*stats64) - n * sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007826#endif
7827}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007828EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007829
Eric Dumazetd83345a2009-11-16 03:36:51 +00007830/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007831 * dev_get_stats - get network device statistics
7832 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07007833 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007834 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00007835 * Get network statistics from device. Return @storage.
7836 * The device driver may provide its own method by setting
7837 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7838 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007839 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00007840struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7841 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00007842{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007843 const struct net_device_ops *ops = dev->netdev_ops;
7844
Eric Dumazet28172732010-07-07 14:58:56 -07007845 if (ops->ndo_get_stats64) {
7846 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007847 ops->ndo_get_stats64(dev, storage);
7848 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00007849 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007850 } else {
7851 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07007852 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007853 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet015f0682014-03-27 08:45:56 -07007854 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
Jarod Wilson6e7333d2016-02-01 18:51:05 -05007855 storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
Eric Dumazet28172732010-07-07 14:58:56 -07007856 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07007857}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007858EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07007859
Eric Dumazet24824a02010-10-02 06:11:55 +00007860struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07007861{
Eric Dumazet24824a02010-10-02 06:11:55 +00007862 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07007863
Eric Dumazet24824a02010-10-02 06:11:55 +00007864#ifdef CONFIG_NET_CLS_ACT
7865 if (queue)
7866 return queue;
7867 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7868 if (!queue)
7869 return NULL;
7870 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007871 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00007872 queue->qdisc_sleeping = &noop_qdisc;
7873 rcu_assign_pointer(dev->ingress_queue, queue);
7874#endif
7875 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07007876}
7877
Eric Dumazet2c60db02012-09-16 09:17:26 +00007878static const struct ethtool_ops default_ethtool_ops;
7879
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00007880void netdev_set_default_ethtool_ops(struct net_device *dev,
7881 const struct ethtool_ops *ops)
7882{
7883 if (dev->ethtool_ops == &default_ethtool_ops)
7884 dev->ethtool_ops = ops;
7885}
7886EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7887
Eric Dumazet74d332c2013-10-30 13:10:44 -07007888void netdev_freemem(struct net_device *dev)
7889{
7890 char *addr = (char *)dev - dev->padded;
7891
WANG Cong4cb28972014-06-02 15:55:22 -07007892 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007893}
7894
Linus Torvalds1da177e2005-04-16 15:20:36 -07007895/**
tcharding722c9a02017-02-09 17:56:04 +11007896 * alloc_netdev_mqs - allocate network device
7897 * @sizeof_priv: size of private data to allocate space for
7898 * @name: device name format string
7899 * @name_assign_type: origin of device name
7900 * @setup: callback to initialize device
7901 * @txqs: the number of TX subqueues to allocate
7902 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07007903 *
tcharding722c9a02017-02-09 17:56:04 +11007904 * Allocates a struct net_device with private data area for driver use
7905 * and performs basic initialization. Also allocates subqueue structs
7906 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007907 */
Tom Herbert36909ea2011-01-09 19:36:31 +00007908struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02007909 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00007910 void (*setup)(struct net_device *),
7911 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007912{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007913 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07007914 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007915 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007916
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07007917 BUG_ON(strlen(name) >= sizeof(dev->name));
7918
Tom Herbert36909ea2011-01-09 19:36:31 +00007919 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007920 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00007921 return NULL;
7922 }
7923
Michael Daltona953be52014-01-16 22:23:28 -08007924#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00007925 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007926 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00007927 return NULL;
7928 }
7929#endif
7930
David S. Millerfd2ea0a2008-07-17 01:56:23 -07007931 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007932 if (sizeof_priv) {
7933 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007934 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007935 alloc_size += sizeof_priv;
7936 }
7937 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007938 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007939
Michal Hockoda6bc572017-05-08 15:57:31 -07007940 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT);
Joe Perches62b59422013-02-04 16:48:16 +00007941 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007942 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007944 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007945 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007946
Eric Dumazet29b44332010-10-11 10:22:12 +00007947 dev->pcpu_refcnt = alloc_percpu(int);
7948 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07007949 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007950
Linus Torvalds1da177e2005-04-16 15:20:36 -07007951 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00007952 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953
Jiri Pirko22bedad32010-04-01 21:22:57 +00007954 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007955 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00007956
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007957 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007958
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07007959 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00007960 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961
Herbert Xud565b0a2008-12-15 23:38:52 -08007962 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007963 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007964 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00007965 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007966 INIT_LIST_HEAD(&dev->adj_list.upper);
7967 INIT_LIST_HEAD(&dev->adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08007968 INIT_LIST_HEAD(&dev->ptype_all);
7969 INIT_LIST_HEAD(&dev->ptype_specific);
Jiri Kosina59cc1f62016-08-10 11:05:15 +02007970#ifdef CONFIG_NET_SCHED
7971 hash_init(dev->qdisc_hash);
7972#endif
Eric Dumazet02875872014-10-05 18:38:35 -07007973 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007974 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007975
Phil Suttera8131042016-02-17 15:37:43 +01007976 if (!dev->tx_queue_len) {
Phil Sutterf84bb1e2015-08-27 21:21:36 +02007977 dev->priv_flags |= IFF_NO_QUEUE;
Jesper Dangaard Brouer11597082016-11-03 14:56:06 +01007978 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
Phil Suttera8131042016-02-17 15:37:43 +01007979 }
Phil Sutter906470c2015-08-18 10:30:48 +02007980
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007981 dev->num_tx_queues = txqs;
7982 dev->real_num_tx_queues = txqs;
7983 if (netif_alloc_netdev_queues(dev))
7984 goto free_all;
7985
Michael Daltona953be52014-01-16 22:23:28 -08007986#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007987 dev->num_rx_queues = rxqs;
7988 dev->real_num_rx_queues = rxqs;
7989 if (netif_alloc_rx_queues(dev))
7990 goto free_all;
7991#endif
7992
Linus Torvalds1da177e2005-04-16 15:20:36 -07007993 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02007994 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00007995 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00007996 if (!dev->ethtool_ops)
7997 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +02007998
7999 nf_hook_ingress_init(dev);
8000
Linus Torvalds1da177e2005-04-16 15:20:36 -07008001 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008002
David S. Miller8d3bdbd2011-02-08 15:02:50 -08008003free_all:
8004 free_netdev(dev);
8005 return NULL;
8006
Eric Dumazet29b44332010-10-11 10:22:12 +00008007free_pcpu:
8008 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07008009free_dev:
8010 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00008011 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008012}
Tom Herbert36909ea2011-01-09 19:36:31 +00008013EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008014
8015/**
tcharding722c9a02017-02-09 17:56:04 +11008016 * free_netdev - free network device
8017 * @dev: device
Linus Torvalds1da177e2005-04-16 15:20:36 -07008018 *
tcharding722c9a02017-02-09 17:56:04 +11008019 * This function does the last stage of destroying an allocated device
8020 * interface. The reference to the device object is released. If this
8021 * is the last reference then it will be freed.Must be called in process
8022 * context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008023 */
8024void free_netdev(struct net_device *dev)
8025{
Herbert Xud565b0a2008-12-15 23:38:52 -08008026 struct napi_struct *p, *n;
David S. Millerb5cdae32017-04-18 15:36:58 -04008027 struct bpf_prog *prog;
Herbert Xud565b0a2008-12-15 23:38:52 -08008028
Eric Dumazet93d05d42015-11-18 06:31:03 -08008029 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -07008030 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08008031#ifdef CONFIG_SYSFS
Pankaj Gupta10595902015-01-12 11:41:28 +05308032 kvfree(dev->_rx);
Tom Herbertfe822242010-11-09 10:47:38 +00008033#endif
David S. Millere8a04642008-07-17 00:34:19 -07008034
Eric Dumazet33d480c2011-08-11 19:30:52 +00008035 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00008036
Jiri Pirkof001fde2009-05-05 02:48:28 +00008037 /* Flush device addresses */
8038 dev_addr_flush(dev);
8039
Herbert Xud565b0a2008-12-15 23:38:52 -08008040 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8041 netif_napi_del(p);
8042
Eric Dumazet29b44332010-10-11 10:22:12 +00008043 free_percpu(dev->pcpu_refcnt);
8044 dev->pcpu_refcnt = NULL;
8045
David S. Millerb5cdae32017-04-18 15:36:58 -04008046 prog = rcu_dereference_protected(dev->xdp_prog, 1);
8047 if (prog) {
8048 bpf_prog_put(prog);
8049 static_key_slow_dec(&generic_xdp_needed);
8050 }
8051
Stephen Hemminger3041a062006-05-26 13:25:24 -07008052 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008053 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07008054 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008055 return;
8056 }
8057
8058 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8059 dev->reg_state = NETREG_RELEASED;
8060
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07008061 /* will free via device release */
8062 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008063}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008064EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008065
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008066/**
8067 * synchronize_net - Synchronize with packet receive processing
8068 *
8069 * Wait for packets currently being received to be done.
8070 * Does not block later packets from starting.
8071 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09008072void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008073{
8074 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00008075 if (rtnl_is_locked())
8076 synchronize_rcu_expedited();
8077 else
8078 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008079}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008080EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008081
8082/**
Eric Dumazet44a08732009-10-27 07:03:04 +00008083 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07008084 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00008085 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08008086 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008087 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008088 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00008089 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008090 *
8091 * Callers must hold the rtnl semaphore. You may want
8092 * unregister_netdev() instead of this.
8093 */
8094
Eric Dumazet44a08732009-10-27 07:03:04 +00008095void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008096{
Herbert Xua6620712007-12-12 19:21:56 -08008097 ASSERT_RTNL();
8098
Eric Dumazet44a08732009-10-27 07:03:04 +00008099 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00008100 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00008101 } else {
8102 rollback_registered(dev);
8103 /* Finish processing unregister after unlock */
8104 net_set_todo(dev);
8105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008106}
Eric Dumazet44a08732009-10-27 07:03:04 +00008107EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008108
8109/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008110 * unregister_netdevice_many - unregister many devices
8111 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07008112 *
8113 * Note: As most callers use a stack allocated list_head,
8114 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008115 */
8116void unregister_netdevice_many(struct list_head *head)
8117{
8118 struct net_device *dev;
8119
8120 if (!list_empty(head)) {
8121 rollback_registered_many(head);
8122 list_for_each_entry(dev, head, unreg_list)
8123 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07008124 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008125 }
8126}
Eric Dumazet63c80992009-10-27 07:06:49 +00008127EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008128
8129/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008130 * unregister_netdev - remove device from the kernel
8131 * @dev: device
8132 *
8133 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008134 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008135 *
8136 * This is just a wrapper for unregister_netdevice that takes
8137 * the rtnl semaphore. In general you want to use this and not
8138 * unregister_netdevice.
8139 */
8140void unregister_netdev(struct net_device *dev)
8141{
8142 rtnl_lock();
8143 unregister_netdevice(dev);
8144 rtnl_unlock();
8145}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008146EXPORT_SYMBOL(unregister_netdev);
8147
Eric W. Biedermance286d32007-09-12 13:53:49 +02008148/**
8149 * dev_change_net_namespace - move device to different nethost namespace
8150 * @dev: device
8151 * @net: network namespace
8152 * @pat: If not NULL name pattern to try if the current device name
8153 * is already taken in the destination network namespace.
8154 *
8155 * This function shuts down a device interface and moves it
8156 * to a new network namespace. On success 0 is returned, on
8157 * a failure a netagive errno code is returned.
8158 *
8159 * Callers must hold the rtnl semaphore.
8160 */
8161
8162int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8163{
Eric W. Biedermance286d32007-09-12 13:53:49 +02008164 int err;
8165
8166 ASSERT_RTNL();
8167
8168 /* Don't allow namespace local devices to be moved. */
8169 err = -EINVAL;
8170 if (dev->features & NETIF_F_NETNS_LOCAL)
8171 goto out;
8172
8173 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008174 if (dev->reg_state != NETREG_REGISTERED)
8175 goto out;
8176
8177 /* Get out if there is nothing todo */
8178 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09008179 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008180 goto out;
8181
8182 /* Pick the destination device name, and ensure
8183 * we can use it in the destination network namespace.
8184 */
8185 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00008186 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008187 /* We get here if we can't use the current device name */
8188 if (!pat)
8189 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00008190 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008191 goto out;
8192 }
8193
8194 /*
8195 * And now a mini version of register_netdevice unregister_netdevice.
8196 */
8197
8198 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07008199 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008200
8201 /* And unlink it from device chain */
8202 err = -ENODEV;
8203 unlist_netdevice(dev);
8204
8205 synchronize_net();
8206
8207 /* Shutdown queueing discipline. */
8208 dev_shutdown(dev);
8209
8210 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11008211 * this device. They should clean all the things.
8212 *
8213 * Note that dev->reg_state stays at NETREG_REGISTERED.
8214 * This is wanted because this way 8021q and macvlan know
8215 * the device is just moving and can keep their slaves up.
8216 */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008217 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00008218 rcu_barrier();
8219 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008220 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008221
8222 /*
8223 * Flush the unicast and multicast chains
8224 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00008225 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00008226 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008227
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008228 /* Send a netdev-removed uevent to the old namespace */
8229 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008230 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008231
Eric W. Biedermance286d32007-09-12 13:53:49 +02008232 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09008233 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008234
Eric W. Biedermance286d32007-09-12 13:53:49 +02008235 /* If there is an ifindex conflict assign a new one */
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02008236 if (__dev_get_by_index(net, dev->ifindex))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008237 dev->ifindex = dev_new_index(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008238
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008239 /* Send a netdev-add uevent to the new namespace */
8240 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008241 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008242
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008243 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07008244 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008245 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008246
8247 /* Add the device back in the hashes */
8248 list_netdevice(dev);
8249
8250 /* Notify protocols, that a new device appeared. */
8251 call_netdevice_notifiers(NETDEV_REGISTER, dev);
8252
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008253 /*
8254 * Prevent userspace races by waiting until the network
8255 * device is fully setup before sending notifications.
8256 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008257 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008258
Eric W. Biedermance286d32007-09-12 13:53:49 +02008259 synchronize_net();
8260 err = 0;
8261out:
8262 return err;
8263}
Johannes Berg463d0182009-07-14 00:33:35 +02008264EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008265
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008266static int dev_cpu_dead(unsigned int oldcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008267{
8268 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008269 struct sk_buff *skb;
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008270 unsigned int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008271 struct softnet_data *sd, *oldsd;
8272
Linus Torvalds1da177e2005-04-16 15:20:36 -07008273 local_irq_disable();
8274 cpu = smp_processor_id();
8275 sd = &per_cpu(softnet_data, cpu);
8276 oldsd = &per_cpu(softnet_data, oldcpu);
8277
8278 /* Find end of our completion_queue. */
8279 list_skb = &sd->completion_queue;
8280 while (*list_skb)
8281 list_skb = &(*list_skb)->next;
8282 /* Append completion queue from offline CPU. */
8283 *list_skb = oldsd->completion_queue;
8284 oldsd->completion_queue = NULL;
8285
Linus Torvalds1da177e2005-04-16 15:20:36 -07008286 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00008287 if (oldsd->output_queue) {
8288 *sd->output_queue_tailp = oldsd->output_queue;
8289 sd->output_queue_tailp = oldsd->output_queue_tailp;
8290 oldsd->output_queue = NULL;
8291 oldsd->output_queue_tailp = &oldsd->output_queue;
8292 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008293 /* Append NAPI poll list from offline CPU, with one exception :
8294 * process_backlog() must be called by cpu owning percpu backlog.
8295 * We properly handle process_queue & input_pkt_queue later.
8296 */
8297 while (!list_empty(&oldsd->poll_list)) {
8298 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8299 struct napi_struct,
8300 poll_list);
8301
8302 list_del_init(&napi->poll_list);
8303 if (napi->poll == process_backlog)
8304 napi->state = 0;
8305 else
8306 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00008307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308
8309 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8310 local_irq_enable();
8311
8312 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00008313 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008314 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008315 input_queue_head_incr(oldsd);
8316 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008317 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008318 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008319 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07008320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008321
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008322 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008323}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008324
Herbert Xu7f353bf2007-08-10 15:47:58 -07008325/**
Herbert Xub63365a2008-10-23 01:11:29 -07008326 * netdev_increment_features - increment feature set by one
8327 * @all: current feature set
8328 * @one: new feature set
8329 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07008330 *
8331 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07008332 * @one to the master device with current feature set @all. Will not
8333 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07008334 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00008335netdev_features_t netdev_increment_features(netdev_features_t all,
8336 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07008337{
Tom Herbertc8cd0982015-12-14 11:19:44 -08008338 if (mask & NETIF_F_HW_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08008339 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +00008340 mask |= NETIF_F_VLAN_CHALLENGED;
8341
Tom Herberta1882222015-12-14 11:19:43 -08008342 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +00008343 all &= one | ~NETIF_F_ALL_FOR_ALL;
8344
Michał Mirosław1742f182011-04-22 06:31:16 +00008345 /* If one device supports hw checksumming, set for all. */
Tom Herbertc8cd0982015-12-14 11:19:44 -08008346 if (all & NETIF_F_HW_CSUM)
8347 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008348
8349 return all;
8350}
Herbert Xub63365a2008-10-23 01:11:29 -07008351EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008352
Baruch Siach430f03c2013-06-02 20:43:55 +00008353static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008354{
8355 int i;
8356 struct hlist_head *hash;
8357
8358 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8359 if (hash != NULL)
8360 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8361 INIT_HLIST_HEAD(&hash[i]);
8362
8363 return hash;
8364}
8365
Eric W. Biederman881d9662007-09-17 11:56:21 -07008366/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07008367static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008368{
Rustad, Mark D734b6542012-07-18 09:06:07 +00008369 if (net != &init_net)
8370 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07008371
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008372 net->dev_name_head = netdev_create_hash();
8373 if (net->dev_name_head == NULL)
8374 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008375
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008376 net->dev_index_head = netdev_create_hash();
8377 if (net->dev_index_head == NULL)
8378 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008379
8380 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008381
8382err_idx:
8383 kfree(net->dev_name_head);
8384err_name:
8385 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008386}
8387
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008388/**
8389 * netdev_drivername - network driver for the device
8390 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008391 *
8392 * Determine network driver for device.
8393 */
David S. Miller3019de12011-06-06 16:41:33 -07008394const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07008395{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07008396 const struct device_driver *driver;
8397 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07008398 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07008399
8400 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008401 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07008402 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008403
8404 driver = parent->driver;
8405 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07008406 return driver->name;
8407 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008408}
8409
Joe Perches6ea754e2014-09-22 11:10:50 -07008410static void __netdev_printk(const char *level, const struct net_device *dev,
8411 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00008412{
Joe Perchesb004ff42012-09-12 20:12:19 -07008413 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008414 dev_printk_emit(level[1] - '0',
8415 dev->dev.parent,
8416 "%s %s %s%s: %pV",
8417 dev_driver_string(dev->dev.parent),
8418 dev_name(dev->dev.parent),
8419 netdev_name(dev), netdev_reg_state(dev),
8420 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008421 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008422 printk("%s%s%s: %pV",
8423 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008424 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07008425 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008426 }
Joe Perches256df2f2010-06-27 01:02:35 +00008427}
8428
Joe Perches6ea754e2014-09-22 11:10:50 -07008429void netdev_printk(const char *level, const struct net_device *dev,
8430 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00008431{
8432 struct va_format vaf;
8433 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00008434
8435 va_start(args, format);
8436
8437 vaf.fmt = format;
8438 vaf.va = &args;
8439
Joe Perches6ea754e2014-09-22 11:10:50 -07008440 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008441
Joe Perches256df2f2010-06-27 01:02:35 +00008442 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00008443}
8444EXPORT_SYMBOL(netdev_printk);
8445
8446#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07008447void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00008448{ \
Joe Perches256df2f2010-06-27 01:02:35 +00008449 struct va_format vaf; \
8450 va_list args; \
8451 \
8452 va_start(args, fmt); \
8453 \
8454 vaf.fmt = fmt; \
8455 vaf.va = &args; \
8456 \
Joe Perches6ea754e2014-09-22 11:10:50 -07008457 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07008458 \
Joe Perches256df2f2010-06-27 01:02:35 +00008459 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00008460} \
8461EXPORT_SYMBOL(func);
8462
8463define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8464define_netdev_printk_level(netdev_alert, KERN_ALERT);
8465define_netdev_printk_level(netdev_crit, KERN_CRIT);
8466define_netdev_printk_level(netdev_err, KERN_ERR);
8467define_netdev_printk_level(netdev_warn, KERN_WARNING);
8468define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8469define_netdev_printk_level(netdev_info, KERN_INFO);
8470
Pavel Emelyanov46650792007-10-08 20:38:39 -07008471static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008472{
8473 kfree(net->dev_name_head);
8474 kfree(net->dev_index_head);
8475}
8476
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008477static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07008478 .init = netdev_init,
8479 .exit = netdev_exit,
8480};
8481
Pavel Emelyanov46650792007-10-08 20:38:39 -07008482static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008483{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008484 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02008485 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008486 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02008487 * initial network namespace
8488 */
8489 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008490 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008491 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008492 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02008493
8494 /* Ignore unmoveable devices (i.e. loopback) */
8495 if (dev->features & NETIF_F_NETNS_LOCAL)
8496 continue;
8497
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008498 /* Leave virtual devices for the generic cleanup */
8499 if (dev->rtnl_link_ops)
8500 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08008501
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008502 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008503 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8504 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008505 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008506 pr_emerg("%s: failed to move %s to init_net: %d\n",
8507 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008508 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02008509 }
8510 }
8511 rtnl_unlock();
8512}
8513
Eric W. Biederman50624c92013-09-23 21:19:49 -07008514static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8515{
8516 /* Return with the rtnl_lock held when there are no network
8517 * devices unregistering in any network namespace in net_list.
8518 */
8519 struct net *net;
8520 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01008521 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008522
Peter Zijlstraff960a72014-10-29 17:04:56 +01008523 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008524 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07008525 unregistering = false;
8526 rtnl_lock();
8527 list_for_each_entry(net, net_list, exit_list) {
8528 if (net->dev_unreg_count > 0) {
8529 unregistering = true;
8530 break;
8531 }
8532 }
8533 if (!unregistering)
8534 break;
8535 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01008536
8537 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008538 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01008539 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008540}
8541
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008542static void __net_exit default_device_exit_batch(struct list_head *net_list)
8543{
8544 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04008545 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008546 * Do this across as many network namespaces as possible to
8547 * improve batching efficiency.
8548 */
8549 struct net_device *dev;
8550 struct net *net;
8551 LIST_HEAD(dev_kill_list);
8552
Eric W. Biederman50624c92013-09-23 21:19:49 -07008553 /* To prevent network device cleanup code from dereferencing
8554 * loopback devices or network devices that have been freed
8555 * wait here for all pending unregistrations to complete,
8556 * before unregistring the loopback device and allowing the
8557 * network namespace be freed.
8558 *
8559 * The netdev todo list containing all network devices
8560 * unregistrations that happen in default_device_exit_batch
8561 * will run in the rtnl_unlock() at the end of
8562 * default_device_exit_batch.
8563 */
8564 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008565 list_for_each_entry(net, net_list, exit_list) {
8566 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02008567 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008568 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8569 else
8570 unregister_netdevice_queue(dev, &dev_kill_list);
8571 }
8572 }
8573 unregister_netdevice_many(&dev_kill_list);
8574 rtnl_unlock();
8575}
8576
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008577static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008578 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008579 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02008580};
8581
Linus Torvalds1da177e2005-04-16 15:20:36 -07008582/*
8583 * Initialize the DEV module. At boot time this walks the device list and
8584 * unhooks any devices that fail to initialise (normally hardware not
8585 * present) and leaves us with a valid list of present and active devices.
8586 *
8587 */
8588
8589/*
8590 * This is called single threaded during boot, so no need
8591 * to take the rtnl semaphore.
8592 */
8593static int __init net_dev_init(void)
8594{
8595 int i, rc = -ENOMEM;
8596
8597 BUG_ON(!dev_boot_phase);
8598
Linus Torvalds1da177e2005-04-16 15:20:36 -07008599 if (dev_proc_init())
8600 goto out;
8601
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008602 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07008603 goto out;
8604
8605 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08008606 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008607 INIT_LIST_HEAD(&ptype_base[i]);
8608
Vlad Yasevich62532da2012-11-15 08:49:10 +00008609 INIT_LIST_HEAD(&offload_base);
8610
Eric W. Biederman881d9662007-09-17 11:56:21 -07008611 if (register_pernet_subsys(&netdev_net_ops))
8612 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008613
8614 /*
8615 * Initialise the packet receive queues.
8616 */
8617
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07008618 for_each_possible_cpu(i) {
Eric Dumazet41852492016-08-26 12:50:39 -07008619 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008620 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008621
Eric Dumazet41852492016-08-26 12:50:39 -07008622 INIT_WORK(flush, flush_backlog);
8623
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008624 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07008625 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008626 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00008627 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00008628#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008629 sd->csd.func = rps_trigger_softirq;
8630 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008631 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07008632#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00008633
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008634 sd->backlog.poll = process_backlog;
8635 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008636 }
8637
Linus Torvalds1da177e2005-04-16 15:20:36 -07008638 dev_boot_phase = 0;
8639
Eric W. Biederman505d4f72008-11-07 22:54:20 -08008640 /* The loopback device is special if any other network devices
8641 * is present in a network namespace the loopback device must
8642 * be present. Since we now dynamically allocate and free the
8643 * loopback device ensure this invariant is maintained by
8644 * keeping the loopback device as the first device on the
8645 * list of network devices. Ensuring the loopback devices
8646 * is the first device that appears and the last network device
8647 * that disappears.
8648 */
8649 if (register_pernet_device(&loopback_net_ops))
8650 goto out;
8651
8652 if (register_pernet_device(&default_device_ops))
8653 goto out;
8654
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008655 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8656 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008657
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008658 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8659 NULL, dev_cpu_dead);
8660 WARN_ON(rc < 0);
Thomas Graff38a9eb2015-07-21 10:43:56 +02008661 dst_subsys_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008662 rc = 0;
8663out:
8664 return rc;
8665}
8666
8667subsys_initcall(net_dev_init);