blob: 64efbb9e44369e7042f3010c346f0ae9cccc8662 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080075#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Brenden Blancoa7862b42016-07-19 12:16:48 -070097#include <linux/bpf.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020098#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -0800100#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700104#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/pkt_sched.h>
106#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000107#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/highmem.h>
109#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500116#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700117#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700118#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700119#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700120#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700121#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700122#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700123#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700124#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900130#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900131#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000132#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700133#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000134#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100135#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300136#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700137#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100138#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400139#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800140#include <linux/hrtimer.h>
Pablo Neirae687ad62015-05-13 18:19:38 +0200141#include <linux/netfilter_ingress.h>
Hariprasad Shenai40e4e712016-06-08 18:09:08 +0530142#include <linux/crash_dump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700144#include "net-sysfs.h"
145
Herbert Xud565b0a2008-12-15 23:38:52 -0800146/* Instead of increasing this, you should create a hash table. */
147#define MAX_GRO_SKBS 8
148
Herbert Xu5d38a072009-01-04 16:13:40 -0800149/* This should be increased if a protocol with a bigger head is added. */
150#define GRO_MAX_HEAD (MAX_HEADER + 128)
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000153static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000154struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
155struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000156static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000158static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli54951192014-07-01 21:39:43 -0700159static int call_netdevice_notifiers_info(unsigned long val,
160 struct net_device *dev,
161 struct netdev_notifier_info *info);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700164 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 * semaphore.
166 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800167 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 *
169 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700170 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 * actual updates. This allows pure readers to access the list even
172 * while a writer is preparing to update it.
173 *
174 * To put it another way, dev_base_lock is held for writing only to
175 * protect against pure readers; the rtnl semaphore provides the
176 * protection against other writers.
177 *
178 * See, for example usages, register_netdevice() and
179 * unregister_netdevice(), which must be called with the rtnl
180 * semaphore held.
181 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183EXPORT_SYMBOL(dev_base_lock);
184
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300185/* protects napi_hash addition/deletion and napi_gen_id */
186static DEFINE_SPINLOCK(napi_hash_lock);
187
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800188static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800189static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300190
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200191static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000192
Thomas Graf4e985ad2011-06-21 03:11:20 +0000193static inline void dev_base_seq_inc(struct net *net)
194{
195 while (++net->dev_base_seq == 0);
196}
197
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
Linus Torvalds8387ff22016-06-10 07:51:30 -0700200 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
Eric Dumazet95c96172012-04-15 05:58:06 +0000201
stephen hemminger08e98972009-11-10 07:20:34 +0000202 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
Eric W. Biederman881d9662007-09-17 11:56:21 -0700205static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700207 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000210static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000211{
212#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000213 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000214#endif
215}
216
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000217static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000218{
219#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000220 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000221#endif
222}
223
Eric W. Biedermance286d32007-09-12 13:53:49 +0200224/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000225static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200226{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900227 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200228
229 ASSERT_RTNL();
230
231 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800232 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000233 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000234 hlist_add_head_rcu(&dev->index_hlist,
235 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200236 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000237
238 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239}
240
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000241/* Device list removal
242 * caller must respect a RCU grace period before freeing/reusing dev
243 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200244static void unlist_netdevice(struct net_device *dev)
245{
246 ASSERT_RTNL();
247
248 /* Unlink dev from the device chain */
249 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800250 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000251 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000252 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200253 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000254
255 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200256}
257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258/*
259 * Our notifier list
260 */
261
Alan Sternf07d5b92006-05-09 15:23:03 -0700262static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264/*
265 * Device drivers call our routines to queue packets here. We empty the
266 * queue in the local softnet handler.
267 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700268
Eric Dumazet9958da02010-04-17 04:17:02 +0000269DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700270EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
David S. Millercf508b12008-07-22 14:16:42 -0700272#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273/*
David S. Millerc773e842008-07-08 23:13:53 -0700274 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700275 * according to dev->type
276 */
277static const unsigned short netdev_lock_type[] =
278 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
279 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
280 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
281 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
282 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
283 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
284 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
285 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
286 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
287 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
288 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
289 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400290 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
291 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
292 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700293
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700294static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
296 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
297 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
298 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
299 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
300 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
301 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
302 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
303 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
304 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
305 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
306 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400307 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
308 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
309 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700310
311static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700312static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700313
314static inline unsigned short netdev_lock_pos(unsigned short dev_type)
315{
316 int i;
317
318 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
319 if (netdev_lock_type[i] == dev_type)
320 return i;
321 /* the last key is used by default */
322 return ARRAY_SIZE(netdev_lock_type) - 1;
323}
324
David S. Millercf508b12008-07-22 14:16:42 -0700325static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700327{
328 int i;
329
330 i = netdev_lock_pos(dev_type);
331 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
332 netdev_lock_name[i]);
333}
David S. Millercf508b12008-07-22 14:16:42 -0700334
335static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
336{
337 int i;
338
339 i = netdev_lock_pos(dev->type);
340 lockdep_set_class_and_name(&dev->addr_list_lock,
341 &netdev_addr_lock_key[i],
342 netdev_lock_name[i]);
343}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700344#else
David S. Millercf508b12008-07-22 14:16:42 -0700345static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
346 unsigned short dev_type)
347{
348}
349static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700350{
351}
352#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354/*******************************************************************************
355
356 Protocol management and registration routines
357
358*******************************************************************************/
359
360/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 * Add a protocol ID to the list. Now that the input handler is
362 * smarter we can dispense with all the messy stuff that used to be
363 * here.
364 *
365 * BEWARE!!! Protocol handlers, mangling input packets,
366 * MUST BE last in hash buckets and checking protocol handlers
367 * MUST start from promiscuous ptype_all chain in net_bh.
368 * It is true now, do not change it.
369 * Explanation follows: if protocol handler, mangling packet, will
370 * be the first on list, it is not able to sense, that packet
371 * is cloned and should be copied-on-write, so that it will
372 * change it and subsequent readers will get broken packet.
373 * --ANK (980803)
374 */
375
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000376static inline struct list_head *ptype_head(const struct packet_type *pt)
377{
378 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800379 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000380 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800381 return pt->dev ? &pt->dev->ptype_specific :
382 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000383}
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * dev_add_pack - add packet handler
387 * @pt: packet type declaration
388 *
389 * Add a protocol handler to the networking stack. The passed &packet_type
390 * is linked into kernel lists and may not be freed until it has been
391 * removed from the kernel lists.
392 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900393 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 * guarantee all CPU's that are in middle of receiving packets
395 * will see the new packet type (until the next received packet).
396 */
397
398void dev_add_pack(struct packet_type *pt)
399{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000400 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000402 spin_lock(&ptype_lock);
403 list_add_rcu(&pt->list, head);
404 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700406EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408/**
409 * __dev_remove_pack - remove packet handler
410 * @pt: packet type declaration
411 *
412 * Remove a protocol handler that was previously added to the kernel
413 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
414 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900415 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 *
417 * The packet type might still be in use by receivers
418 * and must not be freed until after all the CPU's have gone
419 * through a quiescent state.
420 */
421void __dev_remove_pack(struct packet_type *pt)
422{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000423 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 struct packet_type *pt1;
425
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000426 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
428 list_for_each_entry(pt1, head, list) {
429 if (pt == pt1) {
430 list_del_rcu(&pt->list);
431 goto out;
432 }
433 }
434
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000435 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000437 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700439EXPORT_SYMBOL(__dev_remove_pack);
440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441/**
442 * dev_remove_pack - remove packet handler
443 * @pt: packet type declaration
444 *
445 * Remove a protocol handler that was previously added to the kernel
446 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
447 * from the kernel lists and can be freed or reused once this function
448 * returns.
449 *
450 * This call sleeps to guarantee that no CPU is looking at the packet
451 * type after return.
452 */
453void dev_remove_pack(struct packet_type *pt)
454{
455 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 synchronize_net();
458}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700459EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Vlad Yasevich62532da2012-11-15 08:49:10 +0000461
462/**
463 * dev_add_offload - register offload handlers
464 * @po: protocol offload declaration
465 *
466 * Add protocol offload handlers to the networking stack. The passed
467 * &proto_offload is linked into kernel lists and may not be freed until
468 * it has been removed from the kernel lists.
469 *
470 * This call does not sleep therefore it can not
471 * guarantee all CPU's that are in middle of receiving packets
472 * will see the new offload handlers (until the next received packet).
473 */
474void dev_add_offload(struct packet_offload *po)
475{
David S. Millerbdef7de2015-06-01 14:56:09 -0700476 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000477
478 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700479 list_for_each_entry(elem, &offload_base, list) {
480 if (po->priority < elem->priority)
481 break;
482 }
483 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000484 spin_unlock(&offload_lock);
485}
486EXPORT_SYMBOL(dev_add_offload);
487
488/**
489 * __dev_remove_offload - remove offload handler
490 * @po: packet offload declaration
491 *
492 * Remove a protocol offload handler that was previously added to the
493 * kernel offload handlers by dev_add_offload(). The passed &offload_type
494 * is removed from the kernel lists and can be freed or reused once this
495 * function returns.
496 *
497 * The packet type might still be in use by receivers
498 * and must not be freed until after all the CPU's have gone
499 * through a quiescent state.
500 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800501static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000502{
503 struct list_head *head = &offload_base;
504 struct packet_offload *po1;
505
Eric Dumazetc53aa502012-11-16 08:08:23 +0000506 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000507
508 list_for_each_entry(po1, head, list) {
509 if (po == po1) {
510 list_del_rcu(&po->list);
511 goto out;
512 }
513 }
514
515 pr_warn("dev_remove_offload: %p not found\n", po);
516out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000517 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000518}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000519
520/**
521 * dev_remove_offload - remove packet offload handler
522 * @po: packet offload declaration
523 *
524 * Remove a packet offload handler that was previously added to the kernel
525 * offload handlers by dev_add_offload(). The passed &offload_type is
526 * removed from the kernel lists and can be freed or reused once this
527 * function returns.
528 *
529 * This call sleeps to guarantee that no CPU is looking at the packet
530 * type after return.
531 */
532void dev_remove_offload(struct packet_offload *po)
533{
534 __dev_remove_offload(po);
535
536 synchronize_net();
537}
538EXPORT_SYMBOL(dev_remove_offload);
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540/******************************************************************************
541
542 Device Boot-time Settings Routines
543
544*******************************************************************************/
545
546/* Boot time configuration table */
547static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
548
549/**
550 * netdev_boot_setup_add - add new setup entry
551 * @name: name of the device
552 * @map: configured settings for the device
553 *
554 * Adds new setup entry to the dev_boot_setup list. The function
555 * returns 0 on error and 1 on success. This is a generic routine to
556 * all netdevices.
557 */
558static int netdev_boot_setup_add(char *name, struct ifmap *map)
559{
560 struct netdev_boot_setup *s;
561 int i;
562
563 s = dev_boot_setup;
564 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
565 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
566 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700567 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 memcpy(&s[i].map, map, sizeof(s[i].map));
569 break;
570 }
571 }
572
573 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
574}
575
576/**
577 * netdev_boot_setup_check - check boot time settings
578 * @dev: the netdevice
579 *
580 * Check boot time settings for the device.
581 * The found settings are set for the device to be used
582 * later in the device probing.
583 * Returns 0 if no settings found, 1 if they are.
584 */
585int netdev_boot_setup_check(struct net_device *dev)
586{
587 struct netdev_boot_setup *s = dev_boot_setup;
588 int i;
589
590 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
591 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700592 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 dev->irq = s[i].map.irq;
594 dev->base_addr = s[i].map.base_addr;
595 dev->mem_start = s[i].map.mem_start;
596 dev->mem_end = s[i].map.mem_end;
597 return 1;
598 }
599 }
600 return 0;
601}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700602EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
604
605/**
606 * netdev_boot_base - get address from boot time settings
607 * @prefix: prefix for network device
608 * @unit: id for network device
609 *
610 * Check boot time settings for the base address of device.
611 * The found settings are set for the device to be used
612 * later in the device probing.
613 * Returns 0 if no settings found.
614 */
615unsigned long netdev_boot_base(const char *prefix, int unit)
616{
617 const struct netdev_boot_setup *s = dev_boot_setup;
618 char name[IFNAMSIZ];
619 int i;
620
621 sprintf(name, "%s%d", prefix, unit);
622
623 /*
624 * If device already registered then return base of 1
625 * to indicate not to probe for this interface
626 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700627 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 return 1;
629
630 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
631 if (!strcmp(name, s[i].name))
632 return s[i].map.base_addr;
633 return 0;
634}
635
636/*
637 * Saves at boot time configured settings for any netdevice.
638 */
639int __init netdev_boot_setup(char *str)
640{
641 int ints[5];
642 struct ifmap map;
643
644 str = get_options(str, ARRAY_SIZE(ints), ints);
645 if (!str || !*str)
646 return 0;
647
648 /* Save settings */
649 memset(&map, 0, sizeof(map));
650 if (ints[0] > 0)
651 map.irq = ints[1];
652 if (ints[0] > 1)
653 map.base_addr = ints[2];
654 if (ints[0] > 2)
655 map.mem_start = ints[3];
656 if (ints[0] > 3)
657 map.mem_end = ints[4];
658
659 /* Add new entry to the list */
660 return netdev_boot_setup_add(str, &map);
661}
662
663__setup("netdev=", netdev_boot_setup);
664
665/*******************************************************************************
666
667 Device Interface Subroutines
668
669*******************************************************************************/
670
671/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200672 * dev_get_iflink - get 'iflink' value of a interface
673 * @dev: targeted interface
674 *
675 * Indicates the ifindex the interface is linked to.
676 * Physical interfaces have the same 'ifindex' and 'iflink' values.
677 */
678
679int dev_get_iflink(const struct net_device *dev)
680{
681 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
682 return dev->netdev_ops->ndo_get_iflink(dev);
683
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200684 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200685}
686EXPORT_SYMBOL(dev_get_iflink);
687
688/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700689 * dev_fill_metadata_dst - Retrieve tunnel egress information.
690 * @dev: targeted interface
691 * @skb: The packet.
692 *
693 * For better visibility of tunnel traffic OVS needs to retrieve
694 * egress tunnel information for a packet. Following API allows
695 * user to get this info.
696 */
697int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
698{
699 struct ip_tunnel_info *info;
700
701 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
702 return -EINVAL;
703
704 info = skb_tunnel_info_unclone(skb);
705 if (!info)
706 return -ENOMEM;
707 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
708 return -EINVAL;
709
710 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
711}
712EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
713
714/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700716 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 * @name: name to find
718 *
719 * Find an interface by name. Must be called under RTNL semaphore
720 * or @dev_base_lock. If the name is found a pointer to the device
721 * is returned. If the name is not found then %NULL is returned. The
722 * reference counters are not incremented so the caller must be
723 * careful with locks.
724 */
725
Eric W. Biederman881d9662007-09-17 11:56:21 -0700726struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700728 struct net_device *dev;
729 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Sasha Levinb67bfe02013-02-27 17:06:00 -0800731 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 if (!strncmp(dev->name, name, IFNAMSIZ))
733 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return NULL;
736}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700737EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
739/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000740 * dev_get_by_name_rcu - find a device by its name
741 * @net: the applicable net namespace
742 * @name: name to find
743 *
744 * Find an interface by name.
745 * If the name is found a pointer to the device is returned.
746 * If the name is not found then %NULL is returned.
747 * The reference counters are not incremented so the caller must be
748 * careful with locks. The caller must hold RCU lock.
749 */
750
751struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
752{
Eric Dumazet72c95282009-10-30 07:11:27 +0000753 struct net_device *dev;
754 struct hlist_head *head = dev_name_hash(net, name);
755
Sasha Levinb67bfe02013-02-27 17:06:00 -0800756 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000757 if (!strncmp(dev->name, name, IFNAMSIZ))
758 return dev;
759
760 return NULL;
761}
762EXPORT_SYMBOL(dev_get_by_name_rcu);
763
764/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700766 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 * @name: name to find
768 *
769 * Find an interface by name. This can be called from any
770 * context and does its own locking. The returned handle has
771 * the usage count incremented and the caller must use dev_put() to
772 * release it when it is no longer needed. %NULL is returned if no
773 * matching device is found.
774 */
775
Eric W. Biederman881d9662007-09-17 11:56:21 -0700776struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
778 struct net_device *dev;
779
Eric Dumazet72c95282009-10-30 07:11:27 +0000780 rcu_read_lock();
781 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 if (dev)
783 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000784 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 return dev;
786}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700787EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789/**
790 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700791 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 * @ifindex: index of device
793 *
794 * Search for an interface by index. Returns %NULL if the device
795 * is not found or a pointer to the device. The device has not
796 * had its reference counter increased so the caller must be careful
797 * about locking. The caller must hold either the RTNL semaphore
798 * or @dev_base_lock.
799 */
800
Eric W. Biederman881d9662007-09-17 11:56:21 -0700801struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700803 struct net_device *dev;
804 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
Sasha Levinb67bfe02013-02-27 17:06:00 -0800806 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 if (dev->ifindex == ifindex)
808 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 return NULL;
811}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700812EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000814/**
815 * dev_get_by_index_rcu - find a device by its ifindex
816 * @net: the applicable net namespace
817 * @ifindex: index of device
818 *
819 * Search for an interface by index. Returns %NULL if the device
820 * is not found or a pointer to the device. The device has not
821 * had its reference counter increased so the caller must be careful
822 * about locking. The caller must hold RCU lock.
823 */
824
825struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
826{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000827 struct net_device *dev;
828 struct hlist_head *head = dev_index_hash(net, ifindex);
829
Sasha Levinb67bfe02013-02-27 17:06:00 -0800830 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000831 if (dev->ifindex == ifindex)
832 return dev;
833
834 return NULL;
835}
836EXPORT_SYMBOL(dev_get_by_index_rcu);
837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839/**
840 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700841 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 * @ifindex: index of device
843 *
844 * Search for an interface by index. Returns NULL if the device
845 * is not found or a pointer to the device. The device returned has
846 * had a reference added and the pointer is safe until the user calls
847 * dev_put to indicate they have finished with it.
848 */
849
Eric W. Biederman881d9662007-09-17 11:56:21 -0700850struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851{
852 struct net_device *dev;
853
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000854 rcu_read_lock();
855 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (dev)
857 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000858 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return dev;
860}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700861EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200864 * netdev_get_name - get a netdevice name, knowing its ifindex.
865 * @net: network namespace
866 * @name: a pointer to the buffer where the name will be stored.
867 * @ifindex: the ifindex of the interface to get the name from.
868 *
869 * The use of raw_seqcount_begin() and cond_resched() before
870 * retrying is required as we want to give the writers a chance
871 * to complete when CONFIG_PREEMPT is not set.
872 */
873int netdev_get_name(struct net *net, char *name, int ifindex)
874{
875 struct net_device *dev;
876 unsigned int seq;
877
878retry:
879 seq = raw_seqcount_begin(&devnet_rename_seq);
880 rcu_read_lock();
881 dev = dev_get_by_index_rcu(net, ifindex);
882 if (!dev) {
883 rcu_read_unlock();
884 return -ENODEV;
885 }
886
887 strcpy(name, dev->name);
888 rcu_read_unlock();
889 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
890 cond_resched();
891 goto retry;
892 }
893
894 return 0;
895}
896
897/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000898 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700899 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 * @type: media type of device
901 * @ha: hardware address
902 *
903 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800904 * is not found or a pointer to the device.
905 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000906 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 * and the caller must therefore be careful about locking
908 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 */
910
Eric Dumazet941666c2010-12-05 01:23:53 +0000911struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
912 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913{
914 struct net_device *dev;
915
Eric Dumazet941666c2010-12-05 01:23:53 +0000916 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 if (dev->type == type &&
918 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700919 return dev;
920
921 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922}
Eric Dumazet941666c2010-12-05 01:23:53 +0000923EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300924
Eric W. Biederman881d9662007-09-17 11:56:21 -0700925struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700926{
927 struct net_device *dev;
928
929 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700930 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700931 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700932 return dev;
933
934 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700935}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700936EXPORT_SYMBOL(__dev_getfirstbyhwtype);
937
Eric W. Biederman881d9662007-09-17 11:56:21 -0700938struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000940 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000942 rcu_read_lock();
943 for_each_netdev_rcu(net, dev)
944 if (dev->type == type) {
945 dev_hold(dev);
946 ret = dev;
947 break;
948 }
949 rcu_read_unlock();
950 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952EXPORT_SYMBOL(dev_getfirstbyhwtype);
953
954/**
WANG Cong6c555492014-09-11 15:35:09 -0700955 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700956 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 * @if_flags: IFF_* values
958 * @mask: bitmask of bits in if_flags to check
959 *
960 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000961 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700962 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 */
964
WANG Cong6c555492014-09-11 15:35:09 -0700965struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
966 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700968 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
WANG Cong6c555492014-09-11 15:35:09 -0700970 ASSERT_RTNL();
971
Pavel Emelianov7562f872007-05-03 15:13:45 -0700972 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -0700973 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700975 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 break;
977 }
978 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700979 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980}
WANG Cong6c555492014-09-11 15:35:09 -0700981EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983/**
984 * dev_valid_name - check if name is okay for network device
985 * @name: name string
986 *
987 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700988 * to allow sysfs to work. We also disallow any kind of
989 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 */
David S. Miller95f050b2012-03-06 16:12:15 -0500991bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700993 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500994 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700995 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500996 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700997 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500998 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700999
1000 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -06001001 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001002 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001003 name++;
1004 }
David S. Miller95f050b2012-03-06 16:12:15 -05001005 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001007EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001010 * __dev_alloc_name - allocate a name for a device
1011 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001013 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 *
1015 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001016 * id. It scans list of devices to build up a free map, then chooses
1017 * the first empty slot. The caller must hold the dev_base or rtnl lock
1018 * while allocating the name and adding the device in order to avoid
1019 * duplicates.
1020 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1021 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 */
1023
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001024static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
1026 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 const char *p;
1028 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001029 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 struct net_device *d;
1031
1032 p = strnchr(name, IFNAMSIZ-1, '%');
1033 if (p) {
1034 /*
1035 * Verify the string as this thing may have come from
1036 * the user. There must be either one "%d" and no other "%"
1037 * characters.
1038 */
1039 if (p[1] != 'd' || strchr(p + 2, '%'))
1040 return -EINVAL;
1041
1042 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001043 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 if (!inuse)
1045 return -ENOMEM;
1046
Eric W. Biederman881d9662007-09-17 11:56:21 -07001047 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 if (!sscanf(d->name, name, &i))
1049 continue;
1050 if (i < 0 || i >= max_netdevices)
1051 continue;
1052
1053 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001054 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 if (!strncmp(buf, d->name, IFNAMSIZ))
1056 set_bit(i, inuse);
1057 }
1058
1059 i = find_first_zero_bit(inuse, max_netdevices);
1060 free_page((unsigned long) inuse);
1061 }
1062
Octavian Purdilad9031022009-11-18 02:36:59 +00001063 if (buf != name)
1064 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001065 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
1068 /* It is possible to run out of possible slots
1069 * when the name is long and there isn't enough space left
1070 * for the digits, or if all bits are used.
1071 */
1072 return -ENFILE;
1073}
1074
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001075/**
1076 * dev_alloc_name - allocate a name for a device
1077 * @dev: device
1078 * @name: name format string
1079 *
1080 * Passed a format string - eg "lt%d" it will try and find a suitable
1081 * id. It scans list of devices to build up a free map, then chooses
1082 * the first empty slot. The caller must hold the dev_base or rtnl lock
1083 * while allocating the name and adding the device in order to avoid
1084 * duplicates.
1085 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1086 * Returns the number of the unit assigned or a negative errno code.
1087 */
1088
1089int dev_alloc_name(struct net_device *dev, const char *name)
1090{
1091 char buf[IFNAMSIZ];
1092 struct net *net;
1093 int ret;
1094
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001095 BUG_ON(!dev_net(dev));
1096 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001097 ret = __dev_alloc_name(net, name, buf);
1098 if (ret >= 0)
1099 strlcpy(dev->name, buf, IFNAMSIZ);
1100 return ret;
1101}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001102EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001103
Gao feng828de4f2012-09-13 20:58:27 +00001104static int dev_alloc_name_ns(struct net *net,
1105 struct net_device *dev,
1106 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001107{
Gao feng828de4f2012-09-13 20:58:27 +00001108 char buf[IFNAMSIZ];
1109 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001110
Gao feng828de4f2012-09-13 20:58:27 +00001111 ret = __dev_alloc_name(net, name, buf);
1112 if (ret >= 0)
1113 strlcpy(dev->name, buf, IFNAMSIZ);
1114 return ret;
1115}
1116
1117static int dev_get_valid_name(struct net *net,
1118 struct net_device *dev,
1119 const char *name)
1120{
1121 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001122
Octavian Purdilad9031022009-11-18 02:36:59 +00001123 if (!dev_valid_name(name))
1124 return -EINVAL;
1125
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001126 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001127 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001128 else if (__dev_get_by_name(net, name))
1129 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001130 else if (dev->name != name)
1131 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001132
1133 return 0;
1134}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
1136/**
1137 * dev_change_name - change name of a device
1138 * @dev: device
1139 * @newname: name (or format string) must be at least IFNAMSIZ
1140 *
1141 * Change name of a device, can pass format strings "eth%d".
1142 * for wildcarding.
1143 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001144int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Tom Gundersen238fa362014-07-14 16:37:23 +02001146 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001147 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001149 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001150 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
1152 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001153 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001155 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 if (dev->flags & IFF_UP)
1157 return -EBUSY;
1158
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001159 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001160
1161 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001162 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001163 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001164 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001165
Herbert Xufcc5a032007-07-30 17:03:38 -07001166 memcpy(oldname, dev->name, IFNAMSIZ);
1167
Gao feng828de4f2012-09-13 20:58:27 +00001168 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001169 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001170 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001171 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001174 if (oldname[0] && !strchr(oldname, '%'))
1175 netdev_info(dev, "renamed from %s\n", oldname);
1176
Tom Gundersen238fa362014-07-14 16:37:23 +02001177 old_assign_type = dev->name_assign_type;
1178 dev->name_assign_type = NET_NAME_RENAMED;
1179
Herbert Xufcc5a032007-07-30 17:03:38 -07001180rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001181 ret = device_rename(&dev->dev, dev->name);
1182 if (ret) {
1183 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001184 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001185 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001186 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001187 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001188
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001189 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001190
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001191 netdev_adjacent_rename_links(dev, oldname);
1192
Herbert Xu7f988ea2007-07-30 16:35:46 -07001193 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001194 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001195 write_unlock_bh(&dev_base_lock);
1196
1197 synchronize_rcu();
1198
1199 write_lock_bh(&dev_base_lock);
1200 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001201 write_unlock_bh(&dev_base_lock);
1202
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001203 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001204 ret = notifier_to_errno(ret);
1205
1206 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001207 /* err >= 0 after dev_alloc_name() or stores the first errno */
1208 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001209 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001210 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001211 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001212 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001213 dev->name_assign_type = old_assign_type;
1214 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001215 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001216 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001217 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001218 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001219 }
1220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222 return err;
1223}
1224
1225/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001226 * dev_set_alias - change ifalias of a device
1227 * @dev: device
1228 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001229 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001230 *
1231 * Set ifalias for a device,
1232 */
1233int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1234{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001235 char *new_ifalias;
1236
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001237 ASSERT_RTNL();
1238
1239 if (len >= IFALIASZ)
1240 return -EINVAL;
1241
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001242 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001243 kfree(dev->ifalias);
1244 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001245 return 0;
1246 }
1247
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001248 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1249 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001250 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001251 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001252
1253 strlcpy(dev->ifalias, alias, len+1);
1254 return len;
1255}
1256
1257
1258/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001259 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001260 * @dev: device to cause notification
1261 *
1262 * Called to indicate a device has changed features.
1263 */
1264void netdev_features_change(struct net_device *dev)
1265{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001266 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001267}
1268EXPORT_SYMBOL(netdev_features_change);
1269
1270/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 * netdev_state_change - device changes state
1272 * @dev: device to cause notification
1273 *
1274 * Called to indicate a device has changed state. This function calls
1275 * the notifier chains for netdev_chain and sends a NEWLINK message
1276 * to the routing socket.
1277 */
1278void netdev_state_change(struct net_device *dev)
1279{
1280 if (dev->flags & IFF_UP) {
Loic Prylli54951192014-07-01 21:39:43 -07001281 struct netdev_notifier_change_info change_info;
1282
1283 change_info.flags_changed = 0;
1284 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1285 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001286 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 }
1288}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001289EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
Amerigo Wangee89bab2012-08-09 22:14:56 +00001291/**
1292 * netdev_notify_peers - notify network peers about existence of @dev
1293 * @dev: network device
1294 *
1295 * Generate traffic such that interested network peers are aware of
1296 * @dev, such as by generating a gratuitous ARP. This may be used when
1297 * a device wants to inform the rest of the network about some sort of
1298 * reconfiguration such as a failover event or virtual machine
1299 * migration.
1300 */
1301void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001302{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001303 rtnl_lock();
1304 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1305 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001306}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001307EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001308
Patrick McHardybd380812010-02-26 06:34:53 +00001309static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001311 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001312 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001314 ASSERT_RTNL();
1315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 if (!netif_device_present(dev))
1317 return -ENODEV;
1318
Neil Hormanca99ca12013-02-05 08:05:43 +00001319 /* Block netpoll from trying to do any rx path servicing.
1320 * If we don't do this there is a chance ndo_poll_controller
1321 * or ndo_poll may be running while we open the device
1322 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001323 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001324
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001325 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1326 ret = notifier_to_errno(ret);
1327 if (ret)
1328 return ret;
1329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001331
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001332 if (ops->ndo_validate_addr)
1333 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001334
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001335 if (!ret && ops->ndo_open)
1336 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Eric W. Biederman66b55522014-03-27 15:39:03 -07001338 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001339
Jeff Garzikbada3392007-10-23 20:19:37 -07001340 if (ret)
1341 clear_bit(__LINK_STATE_START, &dev->state);
1342 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001344 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001346 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001348
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 return ret;
1350}
Patrick McHardybd380812010-02-26 06:34:53 +00001351
1352/**
1353 * dev_open - prepare an interface for use.
1354 * @dev: device to open
1355 *
1356 * Takes a device from down to up state. The device's private open
1357 * function is invoked and then the multicast lists are loaded. Finally
1358 * the device is moved into the up state and a %NETDEV_UP message is
1359 * sent to the netdev notifier chain.
1360 *
1361 * Calling this function on an active interface is a nop. On a failure
1362 * a negative errno code is returned.
1363 */
1364int dev_open(struct net_device *dev)
1365{
1366 int ret;
1367
Patrick McHardybd380812010-02-26 06:34:53 +00001368 if (dev->flags & IFF_UP)
1369 return 0;
1370
Patrick McHardybd380812010-02-26 06:34:53 +00001371 ret = __dev_open(dev);
1372 if (ret < 0)
1373 return ret;
1374
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001375 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001376 call_netdevice_notifiers(NETDEV_UP, dev);
1377
1378 return ret;
1379}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001380EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Octavian Purdila44345722010-12-13 12:44:07 +00001382static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383{
Octavian Purdila44345722010-12-13 12:44:07 +00001384 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001385
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001386 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001387 might_sleep();
1388
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001389 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001390 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001391 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001392
Octavian Purdila44345722010-12-13 12:44:07 +00001393 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
Octavian Purdila44345722010-12-13 12:44:07 +00001395 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Octavian Purdila44345722010-12-13 12:44:07 +00001397 /* Synchronize to scheduled poll. We cannot touch poll list, it
1398 * can be even on different cpu. So just clear netif_running().
1399 *
1400 * dev->stop() will invoke napi_disable() on all of it's
1401 * napi_struct instances on this device.
1402 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001403 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Octavian Purdila44345722010-12-13 12:44:07 +00001406 dev_deactivate_many(head);
1407
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001408 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001409 const struct net_device_ops *ops = dev->netdev_ops;
1410
1411 /*
1412 * Call the device specific close. This cannot fail.
1413 * Only if device is UP
1414 *
1415 * We allow it to be called even after a DETACH hot-plug
1416 * event.
1417 */
1418 if (ops->ndo_stop)
1419 ops->ndo_stop(dev);
1420
Octavian Purdila44345722010-12-13 12:44:07 +00001421 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001422 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001423 }
1424
1425 return 0;
1426}
1427
1428static int __dev_close(struct net_device *dev)
1429{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001430 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001431 LIST_HEAD(single);
1432
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001433 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001434 retval = __dev_close_many(&single);
1435 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001436
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001437 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001438}
1439
David S. Miller99c4a262015-03-18 22:52:33 -04001440int dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001441{
1442 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001443
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001444 /* Remove the devices that don't need to be closed */
1445 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001446 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001447 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001448
1449 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001450
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001451 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001452 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001453 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001454 if (unlink)
1455 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001456 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 return 0;
1459}
David S. Miller99c4a262015-03-18 22:52:33 -04001460EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001461
1462/**
1463 * dev_close - shutdown an interface.
1464 * @dev: device to shutdown
1465 *
1466 * This function moves an active device into down state. A
1467 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1468 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1469 * chain.
1470 */
1471int dev_close(struct net_device *dev)
1472{
Eric Dumazete14a5992011-05-10 12:26:06 -07001473 if (dev->flags & IFF_UP) {
1474 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001475
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001476 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001477 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001478 list_del(&single);
1479 }
dingtianhongda6e3782013-05-27 19:53:31 +00001480 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001481}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001482EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
1484
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001485/**
1486 * dev_disable_lro - disable Large Receive Offload on a device
1487 * @dev: device
1488 *
1489 * Disable Large Receive Offload (LRO) on a net device. Must be
1490 * called under RTNL. This is needed if received packets may be
1491 * forwarded to another interface.
1492 */
1493void dev_disable_lro(struct net_device *dev)
1494{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001495 struct net_device *lower_dev;
1496 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001497
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001498 dev->wanted_features &= ~NETIF_F_LRO;
1499 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001500
Michał Mirosław22d59692011-04-21 12:42:15 +00001501 if (unlikely(dev->features & NETIF_F_LRO))
1502 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001503
1504 netdev_for_each_lower_dev(dev, lower_dev, iter)
1505 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001506}
1507EXPORT_SYMBOL(dev_disable_lro);
1508
Jiri Pirko351638e2013-05-28 01:30:21 +00001509static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1510 struct net_device *dev)
1511{
1512 struct netdev_notifier_info info;
1513
1514 netdev_notifier_info_init(&info, dev);
1515 return nb->notifier_call(nb, val, &info);
1516}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001517
Eric W. Biederman881d9662007-09-17 11:56:21 -07001518static int dev_boot_phase = 1;
1519
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520/**
1521 * register_netdevice_notifier - register a network notifier block
1522 * @nb: notifier
1523 *
1524 * Register a notifier to be called when network device events occur.
1525 * The notifier passed is linked into the kernel structures and must
1526 * not be reused until it has been unregistered. A negative errno code
1527 * is returned on a failure.
1528 *
1529 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001530 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 * view of the network device list.
1532 */
1533
1534int register_netdevice_notifier(struct notifier_block *nb)
1535{
1536 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001537 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001538 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 int err;
1540
1541 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001542 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001543 if (err)
1544 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001545 if (dev_boot_phase)
1546 goto unlock;
1547 for_each_net(net) {
1548 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001549 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001550 err = notifier_to_errno(err);
1551 if (err)
1552 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Eric W. Biederman881d9662007-09-17 11:56:21 -07001554 if (!(dev->flags & IFF_UP))
1555 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001556
Jiri Pirko351638e2013-05-28 01:30:21 +00001557 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001560
1561unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 rtnl_unlock();
1563 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001564
1565rollback:
1566 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001567 for_each_net(net) {
1568 for_each_netdev(net, dev) {
1569 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001570 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001571
Eric W. Biederman881d9662007-09-17 11:56:21 -07001572 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001573 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1574 dev);
1575 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001576 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001577 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001578 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001579 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001580
RongQing.Li8f891482011-11-30 23:43:07 -05001581outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001582 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001583 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001585EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587/**
1588 * unregister_netdevice_notifier - unregister a network notifier block
1589 * @nb: notifier
1590 *
1591 * Unregister a notifier previously registered by
1592 * register_netdevice_notifier(). The notifier is unlinked into the
1593 * kernel structures and may then be reused. A negative errno code
1594 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001595 *
1596 * After unregistering unregister and down device events are synthesized
1597 * for all devices on the device list to the removed notifier to remove
1598 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 */
1600
1601int unregister_netdevice_notifier(struct notifier_block *nb)
1602{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001603 struct net_device *dev;
1604 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001605 int err;
1606
1607 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001608 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001609 if (err)
1610 goto unlock;
1611
1612 for_each_net(net) {
1613 for_each_netdev(net, dev) {
1614 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001615 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1616 dev);
1617 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001618 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001619 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001620 }
1621 }
1622unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001623 rtnl_unlock();
1624 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001626EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
1628/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001629 * call_netdevice_notifiers_info - call all network notifier blocks
1630 * @val: value passed unmodified to notifier function
1631 * @dev: net_device pointer passed unmodified to notifier function
1632 * @info: notifier information data
1633 *
1634 * Call all network notifier blocks. Parameters and return value
1635 * are as for raw_notifier_call_chain().
1636 */
1637
stephen hemminger1d143d92013-12-29 14:01:29 -08001638static int call_netdevice_notifiers_info(unsigned long val,
1639 struct net_device *dev,
1640 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001641{
1642 ASSERT_RTNL();
1643 netdev_notifier_info_init(info, dev);
1644 return raw_notifier_call_chain(&netdev_chain, val, info);
1645}
Jiri Pirko351638e2013-05-28 01:30:21 +00001646
1647/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 * call_netdevice_notifiers - call all network notifier blocks
1649 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001650 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 *
1652 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001653 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 */
1655
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001656int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657{
Jiri Pirko351638e2013-05-28 01:30:21 +00001658 struct netdev_notifier_info info;
1659
1660 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001662EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
Pablo Neira1cf519002015-05-13 18:19:37 +02001664#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02001665static struct static_key ingress_needed __read_mostly;
1666
1667void net_inc_ingress_queue(void)
1668{
1669 static_key_slow_inc(&ingress_needed);
1670}
1671EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1672
1673void net_dec_ingress_queue(void)
1674{
1675 static_key_slow_dec(&ingress_needed);
1676}
1677EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1678#endif
1679
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001680#ifdef CONFIG_NET_EGRESS
1681static struct static_key egress_needed __read_mostly;
1682
1683void net_inc_egress_queue(void)
1684{
1685 static_key_slow_inc(&egress_needed);
1686}
1687EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1688
1689void net_dec_egress_queue(void)
1690{
1691 static_key_slow_dec(&egress_needed);
1692}
1693EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1694#endif
1695
Ingo Molnarc5905af2012-02-24 08:31:31 +01001696static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001697#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001698/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001699 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001700 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001701 */
1702static atomic_t netstamp_needed_deferred;
1703#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
1705void net_enable_timestamp(void)
1706{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001707#ifdef HAVE_JUMP_LABEL
1708 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1709
1710 if (deferred) {
1711 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001712 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001713 return;
1714 }
1715#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001716 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001718EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
1720void net_disable_timestamp(void)
1721{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001722#ifdef HAVE_JUMP_LABEL
1723 if (in_interrupt()) {
1724 atomic_inc(&netstamp_needed_deferred);
1725 return;
1726 }
1727#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001728 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001730EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Eric Dumazet3b098e22010-05-15 23:57:10 -07001732static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733{
Thomas Gleixner2456e852016-12-25 11:38:40 +01001734 skb->tstamp = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001735 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001736 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737}
1738
Eric Dumazet588f0332011-11-15 04:12:55 +00001739#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001740 if (static_key_false(&netstamp_needed)) { \
Thomas Gleixner2456e852016-12-25 11:38:40 +01001741 if ((COND) && !(SKB)->tstamp) \
Eric Dumazet588f0332011-11-15 04:12:55 +00001742 __net_timestamp(SKB); \
1743 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001744
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02001745bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001746{
1747 unsigned int len;
1748
1749 if (!(dev->flags & IFF_UP))
1750 return false;
1751
1752 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1753 if (skb->len <= len)
1754 return true;
1755
1756 /* if TSO is enabled, we don't care about the length as the packet
1757 * could be forwarded without being segmented before
1758 */
1759 if (skb_is_gso(skb))
1760 return true;
1761
1762 return false;
1763}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001764EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001765
Herbert Xua0265d22014-04-17 13:45:03 +08001766int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1767{
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001768 int ret = ____dev_forward_skb(dev, skb);
1769
1770 if (likely(!ret)) {
1771 skb->protocol = eth_type_trans(skb, dev);
1772 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001773 }
1774
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001775 return ret;
Herbert Xua0265d22014-04-17 13:45:03 +08001776}
1777EXPORT_SYMBOL_GPL(__dev_forward_skb);
1778
Arnd Bergmann44540962009-11-26 06:07:08 +00001779/**
1780 * dev_forward_skb - loopback an skb to another netif
1781 *
1782 * @dev: destination network device
1783 * @skb: buffer to forward
1784 *
1785 * return values:
1786 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001787 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001788 *
1789 * dev_forward_skb can be used for injecting an skb from the
1790 * start_xmit function of one device into the receive queue
1791 * of another device.
1792 *
1793 * The receiving device may be in another namespace, so
1794 * we have to clear all information in the skb that could
1795 * impact namespace isolation.
1796 */
1797int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1798{
Herbert Xua0265d22014-04-17 13:45:03 +08001799 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001800}
1801EXPORT_SYMBOL_GPL(dev_forward_skb);
1802
Changli Gao71d9dec2010-12-15 19:57:25 +00001803static inline int deliver_skb(struct sk_buff *skb,
1804 struct packet_type *pt_prev,
1805 struct net_device *orig_dev)
1806{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001807 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1808 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001809 atomic_inc(&skb->users);
1810 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1811}
1812
Salam Noureddine7866a622015-01-27 11:35:48 -08001813static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1814 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001815 struct net_device *orig_dev,
1816 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001817 struct list_head *ptype_list)
1818{
1819 struct packet_type *ptype, *pt_prev = *pt;
1820
1821 list_for_each_entry_rcu(ptype, ptype_list, list) {
1822 if (ptype->type != type)
1823 continue;
1824 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001825 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001826 pt_prev = ptype;
1827 }
1828 *pt = pt_prev;
1829}
1830
Eric Leblondc0de08d2012-08-16 22:02:58 +00001831static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1832{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001833 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001834 return false;
1835
1836 if (ptype->id_match)
1837 return ptype->id_match(ptype, skb->sk);
1838 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1839 return true;
1840
1841 return false;
1842}
1843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844/*
1845 * Support routine. Sends outgoing frames to any network
1846 * taps currently in use.
1847 */
1848
David Ahern74b20582016-05-10 11:19:50 -07001849void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850{
1851 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001852 struct sk_buff *skb2 = NULL;
1853 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001854 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001857again:
1858 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 /* Never send packets back to the socket
1860 * they originated from - MvS (miquels@drinkel.ow.org)
1861 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001862 if (skb_loop_sk(ptype, skb))
1863 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001864
Salam Noureddine7866a622015-01-27 11:35:48 -08001865 if (pt_prev) {
1866 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001867 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001868 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001870
1871 /* need to clone skb, done only once */
1872 skb2 = skb_clone(skb, GFP_ATOMIC);
1873 if (!skb2)
1874 goto out_unlock;
1875
1876 net_timestamp_set(skb2);
1877
1878 /* skb->nh should be correctly
1879 * set by sender, so that the second statement is
1880 * just protection against buggy protocols.
1881 */
1882 skb_reset_mac_header(skb2);
1883
1884 if (skb_network_header(skb2) < skb2->data ||
1885 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1886 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1887 ntohs(skb2->protocol),
1888 dev->name);
1889 skb_reset_network_header(skb2);
1890 }
1891
1892 skb2->transport_header = skb2->network_header;
1893 skb2->pkt_type = PACKET_OUTGOING;
1894 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001896
1897 if (ptype_list == &ptype_all) {
1898 ptype_list = &dev->ptype_all;
1899 goto again;
1900 }
1901out_unlock:
Changli Gao71d9dec2010-12-15 19:57:25 +00001902 if (pt_prev)
1903 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 rcu_read_unlock();
1905}
David Ahern74b20582016-05-10 11:19:50 -07001906EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
Ben Hutchings2c530402012-07-10 10:55:09 +00001908/**
1909 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001910 * @dev: Network device
1911 * @txq: number of queues available
1912 *
1913 * If real_num_tx_queues is changed the tc mappings may no longer be
1914 * valid. To resolve this verify the tc mapping remains valid and if
1915 * not NULL the mapping. With no priorities mapping to this
1916 * offset/count pair it will no longer be used. In the worst case TC0
1917 * is invalid nothing can be done so disable priority mappings. If is
1918 * expected that drivers will fix this mapping if they can before
1919 * calling netif_set_real_num_tx_queues.
1920 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001921static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001922{
1923 int i;
1924 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1925
1926 /* If TC0 is invalidated disable TC mapping */
1927 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001928 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001929 dev->num_tc = 0;
1930 return;
1931 }
1932
1933 /* Invalidated prio to tc mappings set to TC0 */
1934 for (i = 1; i < TC_BITMASK + 1; i++) {
1935 int q = netdev_get_prio_tc_map(dev, i);
1936
1937 tc = &dev->tc_to_txq[q];
1938 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001939 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1940 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001941 netdev_set_prio_tc_map(dev, i, 0);
1942 }
1943 }
1944}
1945
Alexander Duyck8d059b02016-10-28 11:43:49 -04001946int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
1947{
1948 if (dev->num_tc) {
1949 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1950 int i;
1951
1952 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
1953 if ((txq - tc->offset) < tc->count)
1954 return i;
1955 }
1956
1957 return -1;
1958 }
1959
1960 return 0;
1961}
1962
Alexander Duyck537c00d2013-01-10 08:57:02 +00001963#ifdef CONFIG_XPS
1964static DEFINE_MUTEX(xps_map_mutex);
1965#define xmap_dereference(P) \
1966 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1967
Alexander Duyck6234f872016-10-28 11:46:49 -04001968static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
1969 int tci, u16 index)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001970{
1971 struct xps_map *map = NULL;
1972 int pos;
1973
1974 if (dev_maps)
Alexander Duyck6234f872016-10-28 11:46:49 -04001975 map = xmap_dereference(dev_maps->cpu_map[tci]);
1976 if (!map)
1977 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001978
Alexander Duyck6234f872016-10-28 11:46:49 -04001979 for (pos = map->len; pos--;) {
1980 if (map->queues[pos] != index)
1981 continue;
1982
1983 if (map->len > 1) {
1984 map->queues[pos] = map->queues[--map->len];
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001985 break;
1986 }
Alexander Duyck6234f872016-10-28 11:46:49 -04001987
1988 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
1989 kfree_rcu(map, rcu);
1990 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001991 }
1992
Alexander Duyck6234f872016-10-28 11:46:49 -04001993 return true;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001994}
1995
Alexander Duyck6234f872016-10-28 11:46:49 -04001996static bool remove_xps_queue_cpu(struct net_device *dev,
1997 struct xps_dev_maps *dev_maps,
1998 int cpu, u16 offset, u16 count)
1999{
Alexander Duyck184c4492016-10-28 11:50:13 -04002000 int num_tc = dev->num_tc ? : 1;
2001 bool active = false;
2002 int tci;
Alexander Duyck6234f872016-10-28 11:46:49 -04002003
Alexander Duyck184c4492016-10-28 11:50:13 -04002004 for (tci = cpu * num_tc; num_tc--; tci++) {
2005 int i, j;
2006
2007 for (i = count, j = offset; i--; j++) {
2008 if (!remove_xps_queue(dev_maps, cpu, j))
2009 break;
2010 }
2011
2012 active |= i < 0;
Alexander Duyck6234f872016-10-28 11:46:49 -04002013 }
2014
Alexander Duyck184c4492016-10-28 11:50:13 -04002015 return active;
Alexander Duyck6234f872016-10-28 11:46:49 -04002016}
2017
2018static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2019 u16 count)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002020{
2021 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00002022 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002023 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002024
2025 mutex_lock(&xps_map_mutex);
2026 dev_maps = xmap_dereference(dev->xps_maps);
2027
2028 if (!dev_maps)
2029 goto out_no_maps;
2030
Alexander Duyck6234f872016-10-28 11:46:49 -04002031 for_each_possible_cpu(cpu)
2032 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2033 offset, count);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002034
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002035 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00002036 RCU_INIT_POINTER(dev->xps_maps, NULL);
2037 kfree_rcu(dev_maps, rcu);
2038 }
2039
Alexander Duyck6234f872016-10-28 11:46:49 -04002040 for (i = offset + (count - 1); count--; i--)
Alexander Duyck024e9672013-01-10 08:57:46 +00002041 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2042 NUMA_NO_NODE);
2043
Alexander Duyck537c00d2013-01-10 08:57:02 +00002044out_no_maps:
2045 mutex_unlock(&xps_map_mutex);
2046}
2047
Alexander Duyck6234f872016-10-28 11:46:49 -04002048static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2049{
2050 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2051}
2052
Alexander Duyck01c5f862013-01-10 08:57:35 +00002053static struct xps_map *expand_xps_map(struct xps_map *map,
2054 int cpu, u16 index)
2055{
2056 struct xps_map *new_map;
2057 int alloc_len = XPS_MIN_MAP_ALLOC;
2058 int i, pos;
2059
2060 for (pos = 0; map && pos < map->len; pos++) {
2061 if (map->queues[pos] != index)
2062 continue;
2063 return map;
2064 }
2065
2066 /* Need to add queue to this CPU's existing map */
2067 if (map) {
2068 if (pos < map->alloc_len)
2069 return map;
2070
2071 alloc_len = map->alloc_len * 2;
2072 }
2073
2074 /* Need to allocate new map to store queue on this CPU's map */
2075 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2076 cpu_to_node(cpu));
2077 if (!new_map)
2078 return NULL;
2079
2080 for (i = 0; i < pos; i++)
2081 new_map->queues[i] = map->queues[i];
2082 new_map->alloc_len = alloc_len;
2083 new_map->len = pos;
2084
2085 return new_map;
2086}
2087
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002088int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2089 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002090{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002091 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck184c4492016-10-28 11:50:13 -04002092 int i, cpu, tci, numa_node_id = -2;
2093 int maps_sz, num_tc = 1, tc = 0;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002094 struct xps_map *map, *new_map;
Alexander Duyck01c5f862013-01-10 08:57:35 +00002095 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002096
Alexander Duyck184c4492016-10-28 11:50:13 -04002097 if (dev->num_tc) {
2098 num_tc = dev->num_tc;
2099 tc = netdev_txq_to_tc(dev, index);
2100 if (tc < 0)
2101 return -EINVAL;
2102 }
2103
2104 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2105 if (maps_sz < L1_CACHE_BYTES)
2106 maps_sz = L1_CACHE_BYTES;
2107
Alexander Duyck537c00d2013-01-10 08:57:02 +00002108 mutex_lock(&xps_map_mutex);
2109
2110 dev_maps = xmap_dereference(dev->xps_maps);
2111
Alexander Duyck01c5f862013-01-10 08:57:35 +00002112 /* allocate memory for queue storage */
Alexander Duyck184c4492016-10-28 11:50:13 -04002113 for_each_cpu_and(cpu, cpu_online_mask, mask) {
Alexander Duyck01c5f862013-01-10 08:57:35 +00002114 if (!new_dev_maps)
2115 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002116 if (!new_dev_maps) {
2117 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002118 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002119 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002120
Alexander Duyck184c4492016-10-28 11:50:13 -04002121 tci = cpu * num_tc + tc;
2122 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
Alexander Duyck01c5f862013-01-10 08:57:35 +00002123 NULL;
2124
2125 map = expand_xps_map(map, cpu, index);
2126 if (!map)
2127 goto error;
2128
Alexander Duyck184c4492016-10-28 11:50:13 -04002129 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002130 }
2131
2132 if (!new_dev_maps)
2133 goto out_no_new_maps;
2134
2135 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002136 /* copy maps belonging to foreign traffic classes */
2137 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2138 /* fill in the new device map from the old device map */
2139 map = xmap_dereference(dev_maps->cpu_map[tci]);
2140 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2141 }
2142
2143 /* We need to explicitly update tci as prevous loop
2144 * could break out early if dev_maps is NULL.
2145 */
2146 tci = cpu * num_tc + tc;
2147
Alexander Duyck01c5f862013-01-10 08:57:35 +00002148 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2149 /* add queue to CPU maps */
2150 int pos = 0;
2151
Alexander Duyck184c4492016-10-28 11:50:13 -04002152 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002153 while ((pos < map->len) && (map->queues[pos] != index))
2154 pos++;
2155
2156 if (pos == map->len)
2157 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002158#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002159 if (numa_node_id == -2)
2160 numa_node_id = cpu_to_node(cpu);
2161 else if (numa_node_id != cpu_to_node(cpu))
2162 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002163#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002164 } else if (dev_maps) {
2165 /* fill in the new device map from the old device map */
Alexander Duyck184c4492016-10-28 11:50:13 -04002166 map = xmap_dereference(dev_maps->cpu_map[tci]);
2167 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002168 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002169
Alexander Duyck184c4492016-10-28 11:50:13 -04002170 /* copy maps belonging to foreign traffic classes */
2171 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2172 /* fill in the new device map from the old device map */
2173 map = xmap_dereference(dev_maps->cpu_map[tci]);
2174 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2175 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002176 }
2177
Alexander Duyck01c5f862013-01-10 08:57:35 +00002178 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2179
Alexander Duyck537c00d2013-01-10 08:57:02 +00002180 /* Cleanup old maps */
Alexander Duyck184c4492016-10-28 11:50:13 -04002181 if (!dev_maps)
2182 goto out_no_old_maps;
2183
2184 for_each_possible_cpu(cpu) {
2185 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2186 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2187 map = xmap_dereference(dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002188 if (map && map != new_map)
2189 kfree_rcu(map, rcu);
2190 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002191 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002192
Alexander Duyck184c4492016-10-28 11:50:13 -04002193 kfree_rcu(dev_maps, rcu);
2194
2195out_no_old_maps:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002196 dev_maps = new_dev_maps;
2197 active = true;
2198
2199out_no_new_maps:
2200 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002201 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2202 (numa_node_id >= 0) ? numa_node_id :
2203 NUMA_NO_NODE);
2204
Alexander Duyck01c5f862013-01-10 08:57:35 +00002205 if (!dev_maps)
2206 goto out_no_maps;
2207
2208 /* removes queue from unused CPUs */
2209 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002210 for (i = tc, tci = cpu * num_tc; i--; tci++)
2211 active |= remove_xps_queue(dev_maps, tci, index);
2212 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2213 active |= remove_xps_queue(dev_maps, tci, index);
2214 for (i = num_tc - tc, tci++; --i; tci++)
2215 active |= remove_xps_queue(dev_maps, tci, index);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002216 }
2217
2218 /* free map if not active */
2219 if (!active) {
2220 RCU_INIT_POINTER(dev->xps_maps, NULL);
2221 kfree_rcu(dev_maps, rcu);
2222 }
2223
2224out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002225 mutex_unlock(&xps_map_mutex);
2226
2227 return 0;
2228error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002229 /* remove any maps that we added */
2230 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002231 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2232 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2233 map = dev_maps ?
2234 xmap_dereference(dev_maps->cpu_map[tci]) :
2235 NULL;
2236 if (new_map && new_map != map)
2237 kfree(new_map);
2238 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002239 }
2240
Alexander Duyck537c00d2013-01-10 08:57:02 +00002241 mutex_unlock(&xps_map_mutex);
2242
Alexander Duyck537c00d2013-01-10 08:57:02 +00002243 kfree(new_dev_maps);
2244 return -ENOMEM;
2245}
2246EXPORT_SYMBOL(netif_set_xps_queue);
2247
2248#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002249void netdev_reset_tc(struct net_device *dev)
2250{
Alexander Duyck6234f872016-10-28 11:46:49 -04002251#ifdef CONFIG_XPS
2252 netif_reset_xps_queues_gt(dev, 0);
2253#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002254 dev->num_tc = 0;
2255 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2256 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2257}
2258EXPORT_SYMBOL(netdev_reset_tc);
2259
2260int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2261{
2262 if (tc >= dev->num_tc)
2263 return -EINVAL;
2264
Alexander Duyck6234f872016-10-28 11:46:49 -04002265#ifdef CONFIG_XPS
2266 netif_reset_xps_queues(dev, offset, count);
2267#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002268 dev->tc_to_txq[tc].count = count;
2269 dev->tc_to_txq[tc].offset = offset;
2270 return 0;
2271}
2272EXPORT_SYMBOL(netdev_set_tc_queue);
2273
2274int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2275{
2276 if (num_tc > TC_MAX_QUEUE)
2277 return -EINVAL;
2278
Alexander Duyck6234f872016-10-28 11:46:49 -04002279#ifdef CONFIG_XPS
2280 netif_reset_xps_queues_gt(dev, 0);
2281#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002282 dev->num_tc = num_tc;
2283 return 0;
2284}
2285EXPORT_SYMBOL(netdev_set_num_tc);
2286
John Fastabendf0796d52010-07-01 13:21:57 +00002287/*
2288 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2289 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2290 */
Tom Herberte6484932010-10-18 18:04:39 +00002291int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002292{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002293 int rc;
2294
Tom Herberte6484932010-10-18 18:04:39 +00002295 if (txq < 1 || txq > dev->num_tx_queues)
2296 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002297
Ben Hutchings5c565802011-02-15 19:39:21 +00002298 if (dev->reg_state == NETREG_REGISTERED ||
2299 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002300 ASSERT_RTNL();
2301
Tom Herbert1d24eb42010-11-21 13:17:27 +00002302 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2303 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002304 if (rc)
2305 return rc;
2306
John Fastabend4f57c082011-01-17 08:06:04 +00002307 if (dev->num_tc)
2308 netif_setup_tc(dev, txq);
2309
Alexander Duyck024e9672013-01-10 08:57:46 +00002310 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002311 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002312#ifdef CONFIG_XPS
2313 netif_reset_xps_queues_gt(dev, txq);
2314#endif
2315 }
John Fastabendf0796d52010-07-01 13:21:57 +00002316 }
Tom Herberte6484932010-10-18 18:04:39 +00002317
2318 dev->real_num_tx_queues = txq;
2319 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002320}
2321EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002322
Michael Daltona953be52014-01-16 22:23:28 -08002323#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002324/**
2325 * netif_set_real_num_rx_queues - set actual number of RX queues used
2326 * @dev: Network device
2327 * @rxq: Actual number of RX queues
2328 *
2329 * This must be called either with the rtnl_lock held or before
2330 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002331 * negative error code. If called before registration, it always
2332 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002333 */
2334int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2335{
2336 int rc;
2337
Tom Herbertbd25fa72010-10-18 18:00:16 +00002338 if (rxq < 1 || rxq > dev->num_rx_queues)
2339 return -EINVAL;
2340
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002341 if (dev->reg_state == NETREG_REGISTERED) {
2342 ASSERT_RTNL();
2343
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002344 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2345 rxq);
2346 if (rc)
2347 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002348 }
2349
2350 dev->real_num_rx_queues = rxq;
2351 return 0;
2352}
2353EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2354#endif
2355
Ben Hutchings2c530402012-07-10 10:55:09 +00002356/**
2357 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002358 *
2359 * This routine should set an upper limit on the number of RSS queues
2360 * used by default by multiqueue devices.
2361 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002362int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002363{
Hariprasad Shenai40e4e712016-06-08 18:09:08 +05302364 return is_kdump_kernel() ?
2365 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
Yuval Mintz16917b82012-07-01 03:18:50 +00002366}
2367EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2368
Eric Dumazet3bcb8462016-06-04 20:02:28 -07002369static void __netif_reschedule(struct Qdisc *q)
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002370{
2371 struct softnet_data *sd;
2372 unsigned long flags;
2373
2374 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002375 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002376 q->next_sched = NULL;
2377 *sd->output_queue_tailp = q;
2378 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002379 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2380 local_irq_restore(flags);
2381}
2382
David S. Miller37437bb2008-07-16 02:15:04 -07002383void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002384{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002385 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2386 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002387}
2388EXPORT_SYMBOL(__netif_schedule);
2389
Eric Dumazete6247022013-12-05 04:45:08 -08002390struct dev_kfree_skb_cb {
2391 enum skb_free_reason reason;
2392};
2393
2394static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002395{
Eric Dumazete6247022013-12-05 04:45:08 -08002396 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002397}
Denis Vlasenko56079432006-03-29 15:57:29 -08002398
John Fastabend46e5da42014-09-12 20:04:52 -07002399void netif_schedule_queue(struct netdev_queue *txq)
2400{
2401 rcu_read_lock();
2402 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2403 struct Qdisc *q = rcu_dereference(txq->qdisc);
2404
2405 __netif_schedule(q);
2406 }
2407 rcu_read_unlock();
2408}
2409EXPORT_SYMBOL(netif_schedule_queue);
2410
John Fastabend46e5da42014-09-12 20:04:52 -07002411void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2412{
2413 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2414 struct Qdisc *q;
2415
2416 rcu_read_lock();
2417 q = rcu_dereference(dev_queue->qdisc);
2418 __netif_schedule(q);
2419 rcu_read_unlock();
2420 }
2421}
2422EXPORT_SYMBOL(netif_tx_wake_queue);
2423
Eric Dumazete6247022013-12-05 04:45:08 -08002424void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2425{
2426 unsigned long flags;
2427
2428 if (likely(atomic_read(&skb->users) == 1)) {
2429 smp_rmb();
2430 atomic_set(&skb->users, 0);
2431 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2432 return;
2433 }
2434 get_kfree_skb_cb(skb)->reason = reason;
2435 local_irq_save(flags);
2436 skb->next = __this_cpu_read(softnet_data.completion_queue);
2437 __this_cpu_write(softnet_data.completion_queue, skb);
2438 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2439 local_irq_restore(flags);
2440}
2441EXPORT_SYMBOL(__dev_kfree_skb_irq);
2442
2443void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002444{
2445 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002446 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002447 else
2448 dev_kfree_skb(skb);
2449}
Eric Dumazete6247022013-12-05 04:45:08 -08002450EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002451
2452
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002453/**
2454 * netif_device_detach - mark device as removed
2455 * @dev: network device
2456 *
2457 * Mark device as removed from system and therefore no longer available.
2458 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002459void netif_device_detach(struct net_device *dev)
2460{
2461 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2462 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002463 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002464 }
2465}
2466EXPORT_SYMBOL(netif_device_detach);
2467
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002468/**
2469 * netif_device_attach - mark device as attached
2470 * @dev: network device
2471 *
2472 * Mark device as attached from system and restart if needed.
2473 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002474void netif_device_attach(struct net_device *dev)
2475{
2476 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2477 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002478 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002479 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002480 }
2481}
2482EXPORT_SYMBOL(netif_device_attach);
2483
Jiri Pirko5605c762015-05-12 14:56:12 +02002484/*
2485 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2486 * to be used as a distribution range.
2487 */
2488u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2489 unsigned int num_tx_queues)
2490{
2491 u32 hash;
2492 u16 qoffset = 0;
2493 u16 qcount = num_tx_queues;
2494
2495 if (skb_rx_queue_recorded(skb)) {
2496 hash = skb_get_rx_queue(skb);
2497 while (unlikely(hash >= num_tx_queues))
2498 hash -= num_tx_queues;
2499 return hash;
2500 }
2501
2502 if (dev->num_tc) {
2503 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2504 qoffset = dev->tc_to_txq[tc].offset;
2505 qcount = dev->tc_to_txq[tc].count;
2506 }
2507
2508 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2509}
2510EXPORT_SYMBOL(__skb_tx_hash);
2511
Ben Hutchings36c92472012-01-17 07:57:56 +00002512static void skb_warn_bad_offload(const struct sk_buff *skb)
2513{
Wei Tang84d15ae2016-06-16 21:17:49 +08002514 static const netdev_features_t null_features;
Ben Hutchings36c92472012-01-17 07:57:56 +00002515 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01002516 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00002517
Ben Greearc846ad92013-04-19 10:45:52 +00002518 if (!net_ratelimit())
2519 return;
2520
Bjørn Mork88ad4172015-11-16 19:16:40 +01002521 if (dev) {
2522 if (dev->dev.parent)
2523 name = dev_driver_string(dev->dev.parent);
2524 else
2525 name = netdev_name(dev);
2526 }
Ben Hutchings36c92472012-01-17 07:57:56 +00002527 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2528 "gso_type=%d ip_summed=%d\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01002529 name, dev ? &dev->features : &null_features,
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002530 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002531 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2532 skb_shinfo(skb)->gso_type, skb->ip_summed);
2533}
2534
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535/*
2536 * Invalidate hardware checksum when packet is to be mangled, and
2537 * complete checksum manually on outgoing path.
2538 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002539int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540{
Al Virod3bc23e2006-11-14 21:24:49 -08002541 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002542 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Patrick McHardy84fa7932006-08-29 16:44:56 -07002544 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002545 goto out_set_summed;
2546
2547 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002548 skb_warn_bad_offload(skb);
2549 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 }
2551
Eric Dumazetcef401d2013-01-25 20:34:37 +00002552 /* Before computing a checksum, we should make sure no frag could
2553 * be modified by an external entity : checksum could be wrong.
2554 */
2555 if (skb_has_shared_frag(skb)) {
2556 ret = __skb_linearize(skb);
2557 if (ret)
2558 goto out;
2559 }
2560
Michał Mirosław55508d62010-12-14 15:24:08 +00002561 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002562 BUG_ON(offset >= skb_headlen(skb));
2563 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2564
2565 offset += skb->csum_offset;
2566 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2567
2568 if (skb_cloned(skb) &&
2569 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2571 if (ret)
2572 goto out;
2573 }
2574
Eric Dumazet4f2e4ad2016-10-29 11:02:36 -07002575 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
Herbert Xua430a432006-07-08 13:34:56 -07002576out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002578out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 return ret;
2580}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002581EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582
Vlad Yasevich53d64712014-03-27 17:26:18 -04002583__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002584{
2585 __be16 type = skb->protocol;
2586
Pravin B Shelar19acc322013-05-07 20:41:07 +00002587 /* Tunnel gso handlers can set protocol to ethernet. */
2588 if (type == htons(ETH_P_TEB)) {
2589 struct ethhdr *eth;
2590
2591 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2592 return 0;
2593
2594 eth = (struct ethhdr *)skb_mac_header(skb);
2595 type = eth->h_proto;
2596 }
2597
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002598 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002599}
2600
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002601/**
2602 * skb_mac_gso_segment - mac layer segmentation handler.
2603 * @skb: buffer to segment
2604 * @features: features for the output path (see dev->features)
2605 */
2606struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2607 netdev_features_t features)
2608{
2609 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2610 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002611 int vlan_depth = skb->mac_len;
2612 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002613
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002614 if (unlikely(!type))
2615 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002616
Vlad Yasevich53d64712014-03-27 17:26:18 -04002617 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002618
2619 rcu_read_lock();
2620 list_for_each_entry_rcu(ptype, &offload_base, list) {
2621 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002622 segs = ptype->callbacks.gso_segment(skb, features);
2623 break;
2624 }
2625 }
2626 rcu_read_unlock();
2627
2628 __skb_push(skb, skb->data - skb_mac_header(skb));
2629
2630 return segs;
2631}
2632EXPORT_SYMBOL(skb_mac_gso_segment);
2633
2634
Cong Wang12b00042013-02-05 16:36:38 +00002635/* openvswitch calls this on rx path, so we need a different check.
2636 */
2637static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2638{
2639 if (tx_path)
Eric Dumazet6e7bc472017-02-03 14:29:42 -08002640 return skb->ip_summed != CHECKSUM_PARTIAL &&
2641 skb->ip_summed != CHECKSUM_NONE;
2642
2643 return skb->ip_summed == CHECKSUM_NONE;
Cong Wang12b00042013-02-05 16:36:38 +00002644}
2645
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002646/**
Cong Wang12b00042013-02-05 16:36:38 +00002647 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002648 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002649 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002650 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002651 *
2652 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002653 *
2654 * It may return NULL if the skb requires no segmentation. This is
2655 * only possible when GSO is used for verifying header integrity.
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002656 *
2657 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002658 */
Cong Wang12b00042013-02-05 16:36:38 +00002659struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2660 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002661{
Eric Dumazetb2504a52017-01-31 10:20:32 -08002662 struct sk_buff *segs;
2663
Cong Wang12b00042013-02-05 16:36:38 +00002664 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002665 int err;
2666
Eric Dumazetb2504a52017-01-31 10:20:32 -08002667 /* We're going to init ->check field in TCP or UDP header */
françois romieua40e0a62014-07-15 23:55:35 +02002668 err = skb_cow_head(skb, 0);
2669 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002670 return ERR_PTR(err);
2671 }
2672
Alexander Duyck802ab552016-04-10 21:45:03 -04002673 /* Only report GSO partial support if it will enable us to
2674 * support segmentation on this frame without needing additional
2675 * work.
2676 */
2677 if (features & NETIF_F_GSO_PARTIAL) {
2678 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2679 struct net_device *dev = skb->dev;
2680
2681 partial_features |= dev->features & dev->gso_partial_features;
2682 if (!skb_gso_ok(skb, features | partial_features))
2683 features &= ~NETIF_F_GSO_PARTIAL;
2684 }
2685
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002686 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2687 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2688
Pravin B Shelar68c33162013-02-14 14:02:41 +00002689 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002690 SKB_GSO_CB(skb)->encap_level = 0;
2691
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002692 skb_reset_mac_header(skb);
2693 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002694
Eric Dumazetb2504a52017-01-31 10:20:32 -08002695 segs = skb_mac_gso_segment(skb, features);
2696
2697 if (unlikely(skb_needs_check(skb, tx_path)))
2698 skb_warn_bad_offload(skb);
2699
2700 return segs;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002701}
Cong Wang12b00042013-02-05 16:36:38 +00002702EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002703
Herbert Xufb286bb2005-11-10 13:01:24 -08002704/* Take action when hardware reception checksum errors are detected. */
2705#ifdef CONFIG_BUG
2706void netdev_rx_csum_fault(struct net_device *dev)
2707{
2708 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002709 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002710 dump_stack();
2711 }
2712}
2713EXPORT_SYMBOL(netdev_rx_csum_fault);
2714#endif
2715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716/* Actually, we should eliminate this check as soon as we know, that:
2717 * 1. IOMMU is present and allows to map all the memory.
2718 * 2. No high memory really exists on this machine.
2719 */
2720
Florian Westphalc1e756b2014-05-05 15:00:44 +02002721static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002723#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002725 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2727 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2728 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002729 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002730 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002731 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002733 if (PCI_DMA_BUS_IS_PHYS) {
2734 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735
Eric Dumazet9092c652010-04-02 13:34:49 -07002736 if (!pdev)
2737 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002738 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002739 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2740 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002741 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2742 return 1;
2743 }
2744 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002745#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 return 0;
2747}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748
Simon Horman3b392dd2014-06-04 08:53:17 +09002749/* If MPLS offload request, verify we are testing hardware MPLS features
2750 * instead of standard features for the netdev.
2751 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002752#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002753static netdev_features_t net_mpls_features(struct sk_buff *skb,
2754 netdev_features_t features,
2755 __be16 type)
2756{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002757 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002758 features &= skb->dev->mpls_features;
2759
2760 return features;
2761}
2762#else
2763static netdev_features_t net_mpls_features(struct sk_buff *skb,
2764 netdev_features_t features,
2765 __be16 type)
2766{
2767 return features;
2768}
2769#endif
2770
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002771static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002772 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002773{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002774 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002775 __be16 type;
2776
2777 type = skb_network_protocol(skb, &tmp);
2778 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002779
Ed Cashinc0d680e2012-09-19 15:49:00 +00002780 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002781 !can_checksum_protocol(features, type)) {
Alexander Duyck996e8022016-05-02 09:25:10 -07002782 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Jesse Grossf01a5232011-01-09 06:23:31 +00002783 }
Eric Dumazet7be2c822017-01-18 12:12:17 -08002784 if (illegal_highdma(skb->dev, skb))
2785 features &= ~NETIF_F_SG;
Jesse Grossf01a5232011-01-09 06:23:31 +00002786
2787 return features;
2788}
2789
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002790netdev_features_t passthru_features_check(struct sk_buff *skb,
2791 struct net_device *dev,
2792 netdev_features_t features)
2793{
2794 return features;
2795}
2796EXPORT_SYMBOL(passthru_features_check);
2797
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002798static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2799 struct net_device *dev,
2800 netdev_features_t features)
2801{
2802 return vlan_features_check(skb, features);
2803}
2804
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002805static netdev_features_t gso_features_check(const struct sk_buff *skb,
2806 struct net_device *dev,
2807 netdev_features_t features)
2808{
2809 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2810
2811 if (gso_segs > dev->gso_max_segs)
2812 return features & ~NETIF_F_GSO_MASK;
2813
Alexander Duyck802ab552016-04-10 21:45:03 -04002814 /* Support for GSO partial features requires software
2815 * intervention before we can actually process the packets
2816 * so we need to strip support for any partial features now
2817 * and we can pull them back in after we have partially
2818 * segmented the frame.
2819 */
2820 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2821 features &= ~dev->gso_partial_features;
2822
2823 /* Make sure to clear the IPv4 ID mangling feature if the
2824 * IPv4 header has the potential to be fragmented.
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002825 */
2826 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2827 struct iphdr *iph = skb->encapsulation ?
2828 inner_ip_hdr(skb) : ip_hdr(skb);
2829
2830 if (!(iph->frag_off & htons(IP_DF)))
2831 features &= ~NETIF_F_TSO_MANGLEID;
2832 }
2833
2834 return features;
2835}
2836
Florian Westphalc1e756b2014-05-05 15:00:44 +02002837netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002838{
Jesse Gross5f352272014-12-23 22:37:26 -08002839 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002840 netdev_features_t features = dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002841
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002842 if (skb_is_gso(skb))
2843 features = gso_features_check(skb, dev, features);
Ben Hutchings30b678d2012-07-30 15:57:00 +00002844
Jesse Gross5f352272014-12-23 22:37:26 -08002845 /* If encapsulation offload request, verify we are testing
2846 * hardware encapsulation features instead of standard
2847 * features for the netdev
2848 */
2849 if (skb->encapsulation)
2850 features &= dev->hw_enc_features;
2851
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09002852 if (skb_vlan_tagged(skb))
2853 features = netdev_intersect_features(features,
2854 dev->vlan_features |
2855 NETIF_F_HW_VLAN_CTAG_TX |
2856 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00002857
Jesse Gross5f352272014-12-23 22:37:26 -08002858 if (dev->netdev_ops->ndo_features_check)
2859 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2860 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002861 else
2862 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002863
Florian Westphalc1e756b2014-05-05 15:00:44 +02002864 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002865}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002866EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002867
David S. Miller2ea25512014-08-29 21:10:01 -07002868static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07002869 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002870{
David S. Miller2ea25512014-08-29 21:10:01 -07002871 unsigned int len;
2872 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08002873
Salam Noureddine7866a622015-01-27 11:35:48 -08002874 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07002875 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00002876
David S. Miller2ea25512014-08-29 21:10:01 -07002877 len = skb->len;
2878 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07002879 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07002880 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00002881
Patrick McHardy572a9d72009-11-10 06:14:14 +00002882 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002883}
David S. Miller2ea25512014-08-29 21:10:01 -07002884
David S. Miller8dcda222014-09-01 15:06:40 -07002885struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2886 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07002887{
2888 struct sk_buff *skb = first;
2889 int rc = NETDEV_TX_OK;
2890
2891 while (skb) {
2892 struct sk_buff *next = skb->next;
2893
2894 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07002895 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07002896 if (unlikely(!dev_xmit_complete(rc))) {
2897 skb->next = next;
2898 goto out;
2899 }
2900
2901 skb = next;
2902 if (netif_xmit_stopped(txq) && skb) {
2903 rc = NETDEV_TX_BUSY;
2904 break;
2905 }
2906 }
2907
2908out:
2909 *ret = rc;
2910 return skb;
2911}
2912
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07002913static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2914 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07002915{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002916 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01002917 !vlan_hw_offload_capable(features, skb->vlan_proto))
2918 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07002919 return skb;
2920}
2921
Eric Dumazet55a93b32014-10-03 15:31:07 -07002922static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
David S. Millereae3f882014-08-30 15:17:13 -07002923{
2924 netdev_features_t features;
2925
David S. Millereae3f882014-08-30 15:17:13 -07002926 features = netif_skb_features(skb);
2927 skb = validate_xmit_vlan(skb, features);
2928 if (unlikely(!skb))
2929 goto out_null;
2930
Johannes Berg8b86a612015-04-17 15:45:04 +02002931 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07002932 struct sk_buff *segs;
2933
2934 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08002935 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08002936 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08002937 } else if (segs) {
2938 consume_skb(skb);
2939 skb = segs;
2940 }
David S. Millereae3f882014-08-30 15:17:13 -07002941 } else {
2942 if (skb_needs_linearize(skb, features) &&
2943 __skb_linearize(skb))
2944 goto out_kfree_skb;
2945
2946 /* If packet is not checksummed and device does not
2947 * support checksumming for this protocol, complete
2948 * checksumming here.
2949 */
2950 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2951 if (skb->encapsulation)
2952 skb_set_inner_transport_header(skb,
2953 skb_checksum_start_offset(skb));
2954 else
2955 skb_set_transport_header(skb,
2956 skb_checksum_start_offset(skb));
Tom Herberta1882222015-12-14 11:19:43 -08002957 if (!(features & NETIF_F_CSUM_MASK) &&
David S. Millereae3f882014-08-30 15:17:13 -07002958 skb_checksum_help(skb))
2959 goto out_kfree_skb;
2960 }
2961 }
2962
2963 return skb;
2964
2965out_kfree_skb:
2966 kfree_skb(skb);
2967out_null:
Eric Dumazetd21fd632016-04-12 21:50:07 -07002968 atomic_long_inc(&dev->tx_dropped);
David S. Millereae3f882014-08-30 15:17:13 -07002969 return NULL;
2970}
2971
Eric Dumazet55a93b32014-10-03 15:31:07 -07002972struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2973{
2974 struct sk_buff *next, *head = NULL, *tail;
2975
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07002976 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07002977 next = skb->next;
2978 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07002979
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07002980 /* in case skb wont be segmented, point to itself */
2981 skb->prev = skb;
2982
2983 skb = validate_xmit_skb(skb, dev);
2984 if (!skb)
2985 continue;
2986
2987 if (!head)
2988 head = skb;
2989 else
2990 tail->next = skb;
2991 /* If skb was segmented, skb->prev points to
2992 * the last segment. If not, it still contains skb.
2993 */
2994 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07002995 }
2996 return head;
2997}
Willem de Bruijn104ba782016-10-26 11:23:07 -04002998EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002999
Eric Dumazet1def9232013-01-10 12:36:42 +00003000static void qdisc_pkt_len_init(struct sk_buff *skb)
3001{
3002 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3003
3004 qdisc_skb_cb(skb)->pkt_len = skb->len;
3005
3006 /* To get more precise estimation of bytes sent on wire,
3007 * we add to pkt_len the headers size of all segments
3008 */
3009 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08003010 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00003011 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00003012
Eric Dumazet757b8b12013-01-15 21:14:21 -08003013 /* mac layer + network layer */
3014 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3015
3016 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00003017 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3018 hdr_len += tcp_hdrlen(skb);
3019 else
3020 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00003021
3022 if (shinfo->gso_type & SKB_GSO_DODGY)
3023 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3024 shinfo->gso_size);
3025
3026 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00003027 }
3028}
3029
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003030static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3031 struct net_device *dev,
3032 struct netdev_queue *txq)
3033{
3034 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazet520ac302016-06-21 23:16:49 -07003035 struct sk_buff *to_free = NULL;
Eric Dumazeta2da5702011-01-20 03:48:19 +00003036 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003037 int rc;
3038
Eric Dumazeta2da5702011-01-20 03:48:19 +00003039 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003040 /*
3041 * Heuristic to force contended enqueues to serialize on a
3042 * separate lock before trying to get qdisc main lock.
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07003043 * This permits qdisc->running owner to get the lock more
Ying Xue9bf2b8c2014-06-26 15:56:31 +08003044 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07003045 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00003046 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003047 if (unlikely(contended))
3048 spin_lock(&q->busylock);
3049
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003050 spin_lock(root_lock);
3051 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
Eric Dumazet520ac302016-06-21 23:16:49 -07003052 __qdisc_drop(skb, &to_free);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003053 rc = NET_XMIT_DROP;
3054 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07003055 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003056 /*
3057 * This is a work-conserving queue; there are no old skbs
3058 * waiting to be sent out; and the qdisc is not running -
3059 * xmit the skb directly.
3060 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003061
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003062 qdisc_bstats_update(q, skb);
3063
Eric Dumazet55a93b32014-10-03 15:31:07 -07003064 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07003065 if (unlikely(contended)) {
3066 spin_unlock(&q->busylock);
3067 contended = false;
3068 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003069 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003070 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07003071 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003072
3073 rc = NET_XMIT_SUCCESS;
3074 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -07003075 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07003076 if (qdisc_run_begin(q)) {
3077 if (unlikely(contended)) {
3078 spin_unlock(&q->busylock);
3079 contended = false;
3080 }
3081 __qdisc_run(q);
3082 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003083 }
3084 spin_unlock(root_lock);
Eric Dumazet520ac302016-06-21 23:16:49 -07003085 if (unlikely(to_free))
3086 kfree_skb_list(to_free);
Eric Dumazet79640a42010-06-02 05:09:29 -07003087 if (unlikely(contended))
3088 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003089 return rc;
3090}
3091
Daniel Borkmann86f85152013-12-29 17:27:11 +01003092#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00003093static void skb_update_prio(struct sk_buff *skb)
3094{
Igor Maravic6977a792011-11-25 07:44:54 +00003095 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00003096
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003097 if (!skb->priority && skb->sk && map) {
Tejun Heo2a56a1f2015-12-07 17:38:52 -05003098 unsigned int prioidx =
3099 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003100
3101 if (prioidx < map->priomap_len)
3102 skb->priority = map->priomap[prioidx];
3103 }
Neil Horman5bc14212011-11-22 05:10:51 +00003104}
3105#else
3106#define skb_update_prio(skb)
3107#endif
3108
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02003109DEFINE_PER_CPU(int, xmit_recursion);
3110EXPORT_SYMBOL(xmit_recursion);
3111
Dave Jonesd29f7492008-07-22 14:09:06 -07003112/**
Michel Machado95603e22012-06-12 10:16:35 +00003113 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003114 * @net: network namespace this loopback is happening in
3115 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00003116 * @skb: buffer to transmit
3117 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003118int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00003119{
3120 skb_reset_mac_header(skb);
3121 __skb_pull(skb, skb_network_offset(skb));
3122 skb->pkt_type = PACKET_LOOPBACK;
3123 skb->ip_summed = CHECKSUM_UNNECESSARY;
3124 WARN_ON(!skb_dst(skb));
3125 skb_dst_force(skb);
3126 netif_rx_ni(skb);
3127 return 0;
3128}
3129EXPORT_SYMBOL(dev_loopback_xmit);
3130
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003131#ifdef CONFIG_NET_EGRESS
3132static struct sk_buff *
3133sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3134{
3135 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3136 struct tcf_result cl_res;
3137
3138 if (!cl)
3139 return skb;
3140
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003141 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003142 qdisc_bstats_cpu_update(cl->q, skb);
3143
3144 switch (tc_classify(skb, cl, &cl_res, false)) {
3145 case TC_ACT_OK:
3146 case TC_ACT_RECLASSIFY:
3147 skb->tc_index = TC_H_MIN(cl_res.classid);
3148 break;
3149 case TC_ACT_SHOT:
3150 qdisc_qstats_cpu_drop(cl->q);
3151 *ret = NET_XMIT_DROP;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003152 kfree_skb(skb);
3153 return NULL;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003154 case TC_ACT_STOLEN:
3155 case TC_ACT_QUEUED:
3156 *ret = NET_XMIT_SUCCESS;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003157 consume_skb(skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003158 return NULL;
3159 case TC_ACT_REDIRECT:
3160 /* No need to push/pop skb's mac_header here on egress! */
3161 skb_do_redirect(skb);
3162 *ret = NET_XMIT_SUCCESS;
3163 return NULL;
3164 default:
3165 break;
3166 }
3167
3168 return skb;
3169}
3170#endif /* CONFIG_NET_EGRESS */
3171
Jiri Pirko638b2a62015-05-12 14:56:13 +02003172static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3173{
3174#ifdef CONFIG_XPS
3175 struct xps_dev_maps *dev_maps;
3176 struct xps_map *map;
3177 int queue_index = -1;
3178
3179 rcu_read_lock();
3180 dev_maps = rcu_dereference(dev->xps_maps);
3181 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04003182 unsigned int tci = skb->sender_cpu - 1;
3183
3184 if (dev->num_tc) {
3185 tci *= dev->num_tc;
3186 tci += netdev_get_prio_tc_map(dev, skb->priority);
3187 }
3188
3189 map = rcu_dereference(dev_maps->cpu_map[tci]);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003190 if (map) {
3191 if (map->len == 1)
3192 queue_index = map->queues[0];
3193 else
3194 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3195 map->len)];
3196 if (unlikely(queue_index >= dev->real_num_tx_queues))
3197 queue_index = -1;
3198 }
3199 }
3200 rcu_read_unlock();
3201
3202 return queue_index;
3203#else
3204 return -1;
3205#endif
3206}
3207
3208static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3209{
3210 struct sock *sk = skb->sk;
3211 int queue_index = sk_tx_queue_get(sk);
3212
3213 if (queue_index < 0 || skb->ooo_okay ||
3214 queue_index >= dev->real_num_tx_queues) {
3215 int new_index = get_xps_queue(dev, skb);
3216 if (new_index < 0)
3217 new_index = skb_tx_hash(dev, skb);
3218
3219 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003220 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003221 rcu_access_pointer(sk->sk_dst_cache))
3222 sk_tx_queue_set(sk, new_index);
3223
3224 queue_index = new_index;
3225 }
3226
3227 return queue_index;
3228}
3229
3230struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3231 struct sk_buff *skb,
3232 void *accel_priv)
3233{
3234 int queue_index = 0;
3235
3236#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08003237 u32 sender_cpu = skb->sender_cpu - 1;
3238
3239 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003240 skb->sender_cpu = raw_smp_processor_id() + 1;
3241#endif
3242
3243 if (dev->real_num_tx_queues != 1) {
3244 const struct net_device_ops *ops = dev->netdev_ops;
3245 if (ops->ndo_select_queue)
3246 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3247 __netdev_pick_tx);
3248 else
3249 queue_index = __netdev_pick_tx(dev, skb);
3250
3251 if (!accel_priv)
3252 queue_index = netdev_cap_txqueue(dev, queue_index);
3253 }
3254
3255 skb_set_queue_mapping(skb, queue_index);
3256 return netdev_get_tx_queue(dev, queue_index);
3257}
3258
Michel Machado95603e22012-06-12 10:16:35 +00003259/**
Jason Wang9d08dd32014-01-20 11:25:13 +08003260 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07003261 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08003262 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07003263 *
3264 * Queue a buffer for transmission to a network device. The caller must
3265 * have set the device and priority and built the buffer before calling
3266 * this function. The function can be called from an interrupt.
3267 *
3268 * A negative errno code is returned on a failure. A success does not
3269 * guarantee the frame will be transmitted as it may be dropped due
3270 * to congestion or traffic shaping.
3271 *
3272 * -----------------------------------------------------------------------------------
3273 * I notice this method can also return errors from the queue disciplines,
3274 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3275 * be positive.
3276 *
3277 * Regardless of the return value, the skb is consumed, so it is currently
3278 * difficult to retry a send to this method. (You can bump the ref count
3279 * before sending to hold a reference for retry if you are careful.)
3280 *
3281 * When calling this method, interrupts MUST be enabled. This is because
3282 * the BH enable code must have IRQs enabled so that it will not deadlock.
3283 * --BLG
3284 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05303285static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286{
3287 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07003288 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 struct Qdisc *q;
3290 int rc = -ENOMEM;
3291
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00003292 skb_reset_mac_header(skb);
3293
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003294 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3295 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3296
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003297 /* Disable soft irqs for various locks below. Also
3298 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003300 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301
Neil Horman5bc14212011-11-22 05:10:51 +00003302 skb_update_prio(skb);
3303
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003304 qdisc_pkt_len_init(skb);
3305#ifdef CONFIG_NET_CLS_ACT
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003306 skb->tc_at_ingress = 0;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003307# ifdef CONFIG_NET_EGRESS
3308 if (static_key_false(&egress_needed)) {
3309 skb = sch_handle_egress(skb, &rc, dev);
3310 if (!skb)
3311 goto out;
3312 }
3313# endif
3314#endif
Eric Dumazet02875872014-10-05 18:38:35 -07003315 /* If device/qdisc don't need skb->dst, release it right now while
3316 * its hot in this cpu cache.
3317 */
3318 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3319 skb_dst_drop(skb);
3320 else
3321 skb_dst_force(skb);
3322
Jason Wangf663dd92014-01-10 16:18:26 +08003323 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08003324 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07003325
Koki Sanagicf66ba52010-08-23 18:45:02 +09003326 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003328 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07003329 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 }
3331
3332 /* The device has no queue. Common case for software devices:
3333 loopback, all the sorts of tunnels...
3334
Herbert Xu932ff272006-06-09 12:20:56 -07003335 Really, it is unlikely that netif_tx_lock protection is necessary
3336 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 counters.)
3338 However, it is possible, that they rely on protection
3339 made by us here.
3340
3341 Check this and shot the lock. It is not prone from deadlocks.
3342 Either shot noqueue qdisc, it is even simpler 8)
3343 */
3344 if (dev->flags & IFF_UP) {
3345 int cpu = smp_processor_id(); /* ok because BHs are off */
3346
David S. Millerc773e842008-07-08 23:13:53 -07003347 if (txq->xmit_lock_owner != cpu) {
Daniel Borkmanna70b5062016-06-10 21:19:06 +02003348 if (unlikely(__this_cpu_read(xmit_recursion) >
3349 XMIT_RECURSION_LIMIT))
Eric Dumazet745e20f2010-09-29 13:23:09 -07003350 goto recursion_alert;
3351
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003352 skb = validate_xmit_skb(skb, dev);
3353 if (!skb)
Eric Dumazetd21fd632016-04-12 21:50:07 -07003354 goto out;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003355
David S. Millerc773e842008-07-08 23:13:53 -07003356 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
Tom Herbert734664982011-11-28 16:32:44 +00003358 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07003359 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07003360 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07003361 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00003362 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003363 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 goto out;
3365 }
3366 }
David S. Millerc773e842008-07-08 23:13:53 -07003367 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003368 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3369 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 } else {
3371 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003372 * unfortunately
3373 */
3374recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003375 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3376 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 }
3378 }
3379
3380 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07003381 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
Eric Dumazet015f0682014-03-27 08:45:56 -07003383 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003384 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 return rc;
3386out:
Herbert Xud4828d82006-06-22 02:28:18 -07003387 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 return rc;
3389}
Jason Wangf663dd92014-01-10 16:18:26 +08003390
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003391int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003392{
3393 return __dev_queue_xmit(skb, NULL);
3394}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003395EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396
Jason Wangf663dd92014-01-10 16:18:26 +08003397int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3398{
3399 return __dev_queue_xmit(skb, accel_priv);
3400}
3401EXPORT_SYMBOL(dev_queue_xmit_accel);
3402
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403
3404/*=======================================================================
3405 Receiver routines
3406 =======================================================================*/
3407
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003408int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003409EXPORT_SYMBOL(netdev_max_backlog);
3410
Eric Dumazet3b098e22010-05-15 23:57:10 -07003411int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003412int netdev_budget __read_mostly = 300;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01003413int weight_p __read_mostly = 64; /* old backlog weight */
3414int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3415int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3416int dev_rx_weight __read_mostly = 64;
3417int dev_tx_weight __read_mostly = 64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003419/* Called with irq disabled */
3420static inline void ____napi_schedule(struct softnet_data *sd,
3421 struct napi_struct *napi)
3422{
3423 list_add_tail(&napi->poll_list, &sd->poll_list);
3424 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3425}
3426
Eric Dumazetdf334542010-03-24 19:13:54 +00003427#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003428
3429/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003430struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003431EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003432u32 rps_cpu_mask __read_mostly;
3433EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003434
Ingo Molnarc5905af2012-02-24 08:31:31 +01003435struct static_key rps_needed __read_mostly;
Jason Wang3df97ba2016-04-25 23:13:42 -04003436EXPORT_SYMBOL(rps_needed);
Eric Dumazet13bfff22016-12-07 08:29:10 -08003437struct static_key rfs_needed __read_mostly;
3438EXPORT_SYMBOL(rfs_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +00003439
Ben Hutchingsc4454772011-01-19 11:03:53 +00003440static struct rps_dev_flow *
3441set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3442 struct rps_dev_flow *rflow, u16 next_cpu)
3443{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003444 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003445#ifdef CONFIG_RFS_ACCEL
3446 struct netdev_rx_queue *rxqueue;
3447 struct rps_dev_flow_table *flow_table;
3448 struct rps_dev_flow *old_rflow;
3449 u32 flow_id;
3450 u16 rxq_index;
3451 int rc;
3452
3453 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003454 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3455 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003456 goto out;
3457 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3458 if (rxq_index == skb_get_rx_queue(skb))
3459 goto out;
3460
3461 rxqueue = dev->_rx + rxq_index;
3462 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3463 if (!flow_table)
3464 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003465 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003466 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3467 rxq_index, flow_id);
3468 if (rc < 0)
3469 goto out;
3470 old_rflow = rflow;
3471 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003472 rflow->filter = rc;
3473 if (old_rflow->filter == rflow->filter)
3474 old_rflow->filter = RPS_NO_FILTER;
3475 out:
3476#endif
3477 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003478 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003479 }
3480
Ben Hutchings09994d12011-10-03 04:42:46 +00003481 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003482 return rflow;
3483}
3484
Tom Herbert0a9627f2010-03-16 08:03:29 +00003485/*
3486 * get_rps_cpu is called from netif_receive_skb and returns the target
3487 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003488 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003489 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003490static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3491 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003492{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003493 const struct rps_sock_flow_table *sock_flow_table;
3494 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003495 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003496 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003497 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003498 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003499 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003500
Tom Herbert0a9627f2010-03-16 08:03:29 +00003501 if (skb_rx_queue_recorded(skb)) {
3502 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003503
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003504 if (unlikely(index >= dev->real_num_rx_queues)) {
3505 WARN_ONCE(dev->real_num_rx_queues > 1,
3506 "%s received packet on queue %u, but number "
3507 "of RX queues is %u\n",
3508 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003509 goto done;
3510 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003511 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003512 }
3513
Eric Dumazet567e4b72015-02-06 12:59:01 -08003514 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3515
3516 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3517 map = rcu_dereference(rxqueue->rps_map);
3518 if (!flow_table && !map)
3519 goto done;
3520
Changli Gao2d47b452010-08-17 19:00:56 +00003521 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003522 hash = skb_get_hash(skb);
3523 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003524 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003525
Tom Herbertfec5e652010-04-16 16:01:27 -07003526 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3527 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003528 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003529 u32 next_cpu;
3530 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003531
Eric Dumazet567e4b72015-02-06 12:59:01 -08003532 /* First check into global flow table if there is a match */
3533 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3534 if ((ident ^ hash) & ~rps_cpu_mask)
3535 goto try_rps;
3536
3537 next_cpu = ident & rps_cpu_mask;
3538
3539 /* OK, now we know there is a match,
3540 * we can look at the local (per receive queue) flow table
3541 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003542 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003543 tcpu = rflow->cpu;
3544
Tom Herbertfec5e652010-04-16 16:01:27 -07003545 /*
3546 * If the desired CPU (where last recvmsg was done) is
3547 * different from current CPU (one in the rx-queue flow
3548 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003549 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003550 * - Current CPU is offline.
3551 * - The current CPU's queue tail has advanced beyond the
3552 * last packet that was enqueued using this table entry.
3553 * This guarantees that all previous packets for the flow
3554 * have been dequeued, thus preserving in order delivery.
3555 */
3556 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003557 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003558 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003559 rflow->last_qtail)) >= 0)) {
3560 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003561 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003562 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003563
Eric Dumazeta31196b2015-04-25 09:35:24 -07003564 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003565 *rflowp = rflow;
3566 cpu = tcpu;
3567 goto done;
3568 }
3569 }
3570
Eric Dumazet567e4b72015-02-06 12:59:01 -08003571try_rps:
3572
Tom Herbert0a9627f2010-03-16 08:03:29 +00003573 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003574 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003575 if (cpu_online(tcpu)) {
3576 cpu = tcpu;
3577 goto done;
3578 }
3579 }
3580
3581done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003582 return cpu;
3583}
3584
Ben Hutchingsc4454772011-01-19 11:03:53 +00003585#ifdef CONFIG_RFS_ACCEL
3586
3587/**
3588 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3589 * @dev: Device on which the filter was set
3590 * @rxq_index: RX queue index
3591 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3592 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3593 *
3594 * Drivers that implement ndo_rx_flow_steer() should periodically call
3595 * this function for each installed filter and remove the filters for
3596 * which it returns %true.
3597 */
3598bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3599 u32 flow_id, u16 filter_id)
3600{
3601 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3602 struct rps_dev_flow_table *flow_table;
3603 struct rps_dev_flow *rflow;
3604 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003605 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003606
3607 rcu_read_lock();
3608 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3609 if (flow_table && flow_id <= flow_table->mask) {
3610 rflow = &flow_table->flows[flow_id];
3611 cpu = ACCESS_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003612 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003613 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3614 rflow->last_qtail) <
3615 (int)(10 * flow_table->mask)))
3616 expire = false;
3617 }
3618 rcu_read_unlock();
3619 return expire;
3620}
3621EXPORT_SYMBOL(rps_may_expire_flow);
3622
3623#endif /* CONFIG_RFS_ACCEL */
3624
Tom Herbert0a9627f2010-03-16 08:03:29 +00003625/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003626static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003627{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003628 struct softnet_data *sd = data;
3629
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003630 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003631 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003632}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003633
Tom Herbertfec5e652010-04-16 16:01:27 -07003634#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003635
3636/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003637 * Check if this softnet_data structure is another cpu one
3638 * If yes, queue it to our IPI list and return 1
3639 * If no, return 0
3640 */
3641static int rps_ipi_queued(struct softnet_data *sd)
3642{
3643#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003644 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003645
3646 if (sd != mysd) {
3647 sd->rps_ipi_next = mysd->rps_ipi_list;
3648 mysd->rps_ipi_list = sd;
3649
3650 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3651 return 1;
3652 }
3653#endif /* CONFIG_RPS */
3654 return 0;
3655}
3656
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003657#ifdef CONFIG_NET_FLOW_LIMIT
3658int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3659#endif
3660
3661static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3662{
3663#ifdef CONFIG_NET_FLOW_LIMIT
3664 struct sd_flow_limit *fl;
3665 struct softnet_data *sd;
3666 unsigned int old_flow, new_flow;
3667
3668 if (qlen < (netdev_max_backlog >> 1))
3669 return false;
3670
Christoph Lameter903ceff2014-08-17 12:30:35 -05003671 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003672
3673 rcu_read_lock();
3674 fl = rcu_dereference(sd->flow_limit);
3675 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003676 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003677 old_flow = fl->history[fl->history_head];
3678 fl->history[fl->history_head] = new_flow;
3679
3680 fl->history_head++;
3681 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3682
3683 if (likely(fl->buckets[old_flow]))
3684 fl->buckets[old_flow]--;
3685
3686 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3687 fl->count++;
3688 rcu_read_unlock();
3689 return true;
3690 }
3691 }
3692 rcu_read_unlock();
3693#endif
3694 return false;
3695}
3696
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003697/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003698 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3699 * queue (may be a remote CPU queue).
3700 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003701static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3702 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003703{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003704 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003705 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003706 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003707
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003708 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003709
3710 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003711
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003712 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003713 if (!netif_running(skb->dev))
3714 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003715 qlen = skb_queue_len(&sd->input_pkt_queue);
3716 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003717 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003718enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003719 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003720 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003721 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003722 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003723 return NET_RX_SUCCESS;
3724 }
3725
Eric Dumazetebda37c22010-05-06 23:51:21 +00003726 /* Schedule NAPI for backlog device
3727 * We can use non atomic operation since we own the queue lock
3728 */
3729 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003730 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003731 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003732 }
3733 goto enqueue;
3734 }
3735
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003736drop:
Changli Gaodee42872010-05-02 05:42:16 +00003737 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003738 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003739
Tom Herbert0a9627f2010-03-16 08:03:29 +00003740 local_irq_restore(flags);
3741
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003742 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003743 kfree_skb(skb);
3744 return NET_RX_DROP;
3745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003747static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003749 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750
Eric Dumazet588f0332011-11-15 04:12:55 +00003751 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752
Koki Sanagicf66ba52010-08-23 18:45:02 +09003753 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003754#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003755 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003756 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003757 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758
Changli Gaocece1942010-08-07 20:35:43 -07003759 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003760 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003761
3762 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003763 if (cpu < 0)
3764 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003765
3766 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3767
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003768 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003769 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003770 } else
3771#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003772 {
3773 unsigned int qtail;
3774 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3775 put_cpu();
3776 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003777 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003779
3780/**
3781 * netif_rx - post buffer to the network code
3782 * @skb: buffer to post
3783 *
3784 * This function receives a packet from a device driver and queues it for
3785 * the upper (protocol) levels to process. It always succeeds. The buffer
3786 * may be dropped during processing for congestion control or by the
3787 * protocol layers.
3788 *
3789 * return values:
3790 * NET_RX_SUCCESS (no congestion)
3791 * NET_RX_DROP (packet was dropped)
3792 *
3793 */
3794
3795int netif_rx(struct sk_buff *skb)
3796{
3797 trace_netif_rx_entry(skb);
3798
3799 return netif_rx_internal(skb);
3800}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003801EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802
3803int netif_rx_ni(struct sk_buff *skb)
3804{
3805 int err;
3806
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003807 trace_netif_rx_ni_entry(skb);
3808
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003810 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811 if (local_softirq_pending())
3812 do_softirq();
3813 preempt_enable();
3814
3815 return err;
3816}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817EXPORT_SYMBOL(netif_rx_ni);
3818
Emese Revfy0766f782016-06-20 20:42:34 +02003819static __latent_entropy void net_tx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820{
Christoph Lameter903ceff2014-08-17 12:30:35 -05003821 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822
3823 if (sd->completion_queue) {
3824 struct sk_buff *clist;
3825
3826 local_irq_disable();
3827 clist = sd->completion_queue;
3828 sd->completion_queue = NULL;
3829 local_irq_enable();
3830
3831 while (clist) {
3832 struct sk_buff *skb = clist;
3833 clist = clist->next;
3834
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003835 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003836 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3837 trace_consume_skb(skb);
3838 else
3839 trace_kfree_skb(skb, net_tx_action);
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01003840
3841 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
3842 __kfree_skb(skb);
3843 else
3844 __kfree_skb_defer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845 }
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01003846
3847 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 }
3849
3850 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003851 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852
3853 local_irq_disable();
3854 head = sd->output_queue;
3855 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003856 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 local_irq_enable();
3858
3859 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003860 struct Qdisc *q = head;
3861 spinlock_t *root_lock;
3862
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863 head = head->next_sched;
3864
David S. Miller5fb66222008-08-02 20:02:43 -07003865 root_lock = qdisc_lock(q);
Eric Dumazet3bcb8462016-06-04 20:02:28 -07003866 spin_lock(root_lock);
3867 /* We need to make sure head->next_sched is read
3868 * before clearing __QDISC_STATE_SCHED
3869 */
3870 smp_mb__before_atomic();
3871 clear_bit(__QDISC_STATE_SCHED, &q->state);
3872 qdisc_run(q);
3873 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 }
3875 }
3876}
3877
Javier Martinez Canillas181402a2016-09-09 08:43:15 -04003878#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
Michał Mirosławda678292009-06-05 05:35:28 +00003879/* This hook is defined here for ATM LANE */
3880int (*br_fdb_test_addr_hook)(struct net_device *dev,
3881 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003882EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003883#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003885static inline struct sk_buff *
3886sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3887 struct net_device *orig_dev)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003888{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003889#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003890 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3891 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00003892
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003893 /* If there's at least one ingress present somewhere (so
3894 * we get here via enabled static key), remaining devices
3895 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003896 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003897 */
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003898 if (!cl)
Daniel Borkmann45771392015-04-10 23:07:54 +02003899 return skb;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003900 if (*pt_prev) {
3901 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3902 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003903 }
3904
Florian Westphal33654952015-05-14 00:36:28 +02003905 qdisc_skb_cb(skb)->pkt_len = skb->len;
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003906 skb->tc_at_ingress = 1;
Eric Dumazet24ea5912015-07-06 05:18:03 -07003907 qdisc_bstats_cpu_update(cl->q, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003908
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02003909 switch (tc_classify(skb, cl, &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003910 case TC_ACT_OK:
3911 case TC_ACT_RECLASSIFY:
3912 skb->tc_index = TC_H_MIN(cl_res.classid);
3913 break;
3914 case TC_ACT_SHOT:
Eric Dumazet24ea5912015-07-06 05:18:03 -07003915 qdisc_qstats_cpu_drop(cl->q);
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07003916 kfree_skb(skb);
3917 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003918 case TC_ACT_STOLEN:
3919 case TC_ACT_QUEUED:
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07003920 consume_skb(skb);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003921 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07003922 case TC_ACT_REDIRECT:
3923 /* skb_mac_header check was done by cls/act_bpf, so
3924 * we can safely push the L2 header back before
3925 * redirecting to another netdev
3926 */
3927 __skb_push(skb, skb->mac_len);
3928 skb_do_redirect(skb);
3929 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003930 default:
3931 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003932 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003933#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07003934 return skb;
3935}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003937/**
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07003938 * netdev_is_rx_handler_busy - check if receive handler is registered
3939 * @dev: device to check
3940 *
3941 * Check if a receive handler is already registered for a given device.
3942 * Return true if there one.
3943 *
3944 * The caller must hold the rtnl_mutex.
3945 */
3946bool netdev_is_rx_handler_busy(struct net_device *dev)
3947{
3948 ASSERT_RTNL();
3949 return dev && rtnl_dereference(dev->rx_handler);
3950}
3951EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
3952
3953/**
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003954 * netdev_rx_handler_register - register receive handler
3955 * @dev: device to register a handler for
3956 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003957 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003958 *
Masanari Iidae2278672014-02-18 22:54:36 +09003959 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003960 * called from __netif_receive_skb. A negative errno code is returned
3961 * on a failure.
3962 *
3963 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003964 *
3965 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003966 */
3967int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003968 rx_handler_func_t *rx_handler,
3969 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003970{
Mahesh Bandewar1b7cd002017-01-18 15:02:49 -08003971 if (netdev_is_rx_handler_busy(dev))
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003972 return -EBUSY;
3973
Eric Dumazet00cfec32013-03-29 03:01:22 +00003974 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003975 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003976 rcu_assign_pointer(dev->rx_handler, rx_handler);
3977
3978 return 0;
3979}
3980EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3981
3982/**
3983 * netdev_rx_handler_unregister - unregister receive handler
3984 * @dev: device to unregister a handler from
3985 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003986 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003987 *
3988 * The caller must hold the rtnl_mutex.
3989 */
3990void netdev_rx_handler_unregister(struct net_device *dev)
3991{
3992
3993 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003994 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003995 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3996 * section has a guarantee to see a non NULL rx_handler_data
3997 * as well.
3998 */
3999 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004000 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004001}
4002EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4003
Mel Gormanb4b9e352012-07-31 16:44:26 -07004004/*
4005 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4006 * the special handling of PFMEMALLOC skbs.
4007 */
4008static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4009{
4010 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07004011 case htons(ETH_P_ARP):
4012 case htons(ETH_P_IP):
4013 case htons(ETH_P_IPV6):
4014 case htons(ETH_P_8021Q):
4015 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07004016 return true;
4017 default:
4018 return false;
4019 }
4020}
4021
Pablo Neirae687ad62015-05-13 18:19:38 +02004022static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4023 int *ret, struct net_device *orig_dev)
4024{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004025#ifdef CONFIG_NETFILTER_INGRESS
Pablo Neirae687ad62015-05-13 18:19:38 +02004026 if (nf_hook_ingress_active(skb)) {
Aaron Conole2c1e2702016-09-21 11:35:03 -04004027 int ingress_retval;
4028
Pablo Neirae687ad62015-05-13 18:19:38 +02004029 if (*pt_prev) {
4030 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4031 *pt_prev = NULL;
4032 }
4033
Aaron Conole2c1e2702016-09-21 11:35:03 -04004034 rcu_read_lock();
4035 ingress_retval = nf_hook_ingress(skb);
4036 rcu_read_unlock();
4037 return ingress_retval;
Pablo Neirae687ad62015-05-13 18:19:38 +02004038 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004039#endif /* CONFIG_NETFILTER_INGRESS */
Pablo Neirae687ad62015-05-13 18:19:38 +02004040 return 0;
4041}
Pablo Neirae687ad62015-05-13 18:19:38 +02004042
David S. Miller9754e292013-02-14 15:57:38 -05004043static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044{
4045 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004046 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07004047 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004048 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08004050 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051
Eric Dumazet588f0332011-11-15 04:12:55 +00004052 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07004053
Koki Sanagicf66ba52010-08-23 18:45:02 +09004054 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08004055
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07004056 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00004057
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07004058 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00004059 if (!skb_transport_header_was_set(skb))
4060 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00004061 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062
4063 pt_prev = NULL;
4064
David S. Miller63d8ea72011-02-28 10:48:59 -08004065another_round:
David S. Millerb6858172012-07-23 16:27:54 -07004066 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08004067
4068 __this_cpu_inc(softnet_data.processed);
4069
Patrick McHardy8ad227f2013-04-19 02:04:31 +00004070 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4071 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004072 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004073 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004074 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004075 }
4076
Willem de Bruijne7246e12017-01-07 17:06:35 -05004077 if (skb_skip_tc_classify(skb))
4078 goto skip_classify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079
David S. Miller9754e292013-02-14 15:57:38 -05004080 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07004081 goto skip_taps;
4082
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08004084 if (pt_prev)
4085 ret = deliver_skb(skb, pt_prev, orig_dev);
4086 pt_prev = ptype;
4087 }
4088
4089 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4090 if (pt_prev)
4091 ret = deliver_skb(skb, pt_prev, orig_dev);
4092 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093 }
4094
Mel Gormanb4b9e352012-07-31 16:44:26 -07004095skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02004096#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02004097 if (static_key_false(&ingress_needed)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004098 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
Daniel Borkmann45771392015-04-10 23:07:54 +02004099 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004100 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02004101
4102 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004103 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02004104 }
Pablo Neira1cf519002015-05-13 18:19:37 +02004105#endif
Willem de Bruijna5135bc2017-01-07 17:06:36 -05004106 skb_reset_tc(skb);
Willem de Bruijne7246e12017-01-07 17:06:35 -05004107skip_classify:
David S. Miller9754e292013-02-14 15:57:38 -05004108 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07004109 goto drop;
4110
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004111 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00004112 if (pt_prev) {
4113 ret = deliver_skb(skb, pt_prev, orig_dev);
4114 pt_prev = NULL;
4115 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004116 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00004117 goto another_round;
4118 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004119 goto out;
John Fastabend24257172011-10-10 09:16:41 +00004120 }
4121
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004122 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004123 if (rx_handler) {
4124 if (pt_prev) {
4125 ret = deliver_skb(skb, pt_prev, orig_dev);
4126 pt_prev = NULL;
4127 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004128 switch (rx_handler(&skb)) {
4129 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00004130 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004131 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004132 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08004133 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004134 case RX_HANDLER_EXACT:
4135 deliver_exact = true;
4136 case RX_HANDLER_PASS:
4137 break;
4138 default:
4139 BUG();
4140 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004143 if (unlikely(skb_vlan_tag_present(skb))) {
4144 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07004145 skb->pkt_type = PACKET_OTHERHOST;
4146 /* Note: we might in the future use prio bits
4147 * and set skb->priority like in vlan_do_receive()
4148 * For the time being, just ignore Priority Code Point
4149 */
4150 skb->vlan_tci = 0;
4151 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004152
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08004154
4155 /* deliver only exact match when indicated */
4156 if (likely(!deliver_exact)) {
4157 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4158 &ptype_base[ntohs(type) &
4159 PTYPE_HASH_MASK]);
4160 }
4161
4162 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4163 &orig_dev->ptype_specific);
4164
4165 if (unlikely(skb->dev != orig_dev)) {
4166 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4167 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 }
4169
4170 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004171 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00004172 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004173 else
4174 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07004176drop:
Jarod Wilson6e7333d2016-02-01 18:51:05 -05004177 if (!deliver_exact)
4178 atomic_long_inc(&skb->dev->rx_dropped);
4179 else
4180 atomic_long_inc(&skb->dev->rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 kfree_skb(skb);
4182 /* Jamal, now you will not able to escape explaining
4183 * me how you were going to use this. :-)
4184 */
4185 ret = NET_RX_DROP;
4186 }
4187
Julian Anastasov2c17d272015-07-09 09:59:10 +03004188out:
David S. Miller9754e292013-02-14 15:57:38 -05004189 return ret;
4190}
4191
4192static int __netif_receive_skb(struct sk_buff *skb)
4193{
4194 int ret;
4195
4196 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4197 unsigned long pflags = current->flags;
4198
4199 /*
4200 * PFMEMALLOC skbs are special, they should
4201 * - be delivered to SOCK_MEMALLOC sockets only
4202 * - stay away from userspace
4203 * - have bounded memory usage
4204 *
4205 * Use PF_MEMALLOC as this saves us from propagating the allocation
4206 * context down to all allocation sites.
4207 */
4208 current->flags |= PF_MEMALLOC;
4209 ret = __netif_receive_skb_core(skb, true);
4210 tsk_restore_flags(current, pflags, PF_MEMALLOC);
4211 } else
4212 ret = __netif_receive_skb_core(skb, false);
4213
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 return ret;
4215}
Tom Herbert0a9627f2010-03-16 08:03:29 +00004216
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004217static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004218{
Julian Anastasov2c17d272015-07-09 09:59:10 +03004219 int ret;
4220
Eric Dumazet588f0332011-11-15 04:12:55 +00004221 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07004222
Richard Cochranc1f19b52010-07-17 08:49:36 +00004223 if (skb_defer_rx_timestamp(skb))
4224 return NET_RX_SUCCESS;
4225
Julian Anastasov2c17d272015-07-09 09:59:10 +03004226 rcu_read_lock();
4227
Eric Dumazetdf334542010-03-24 19:13:54 +00004228#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004229 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07004230 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004231 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07004232
Eric Dumazet3b098e22010-05-15 23:57:10 -07004233 if (cpu >= 0) {
4234 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4235 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00004236 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004237 }
Tom Herbertfec5e652010-04-16 16:01:27 -07004238 }
Tom Herbert1e94d722010-03-18 17:45:44 -07004239#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03004240 ret = __netif_receive_skb(skb);
4241 rcu_read_unlock();
4242 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004243}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004244
4245/**
4246 * netif_receive_skb - process receive buffer from network
4247 * @skb: buffer to process
4248 *
4249 * netif_receive_skb() is the main receive data processing function.
4250 * It always succeeds. The buffer may be dropped during processing
4251 * for congestion control or by the protocol layers.
4252 *
4253 * This function may only be called from softirq context and interrupts
4254 * should be enabled.
4255 *
4256 * Return values (usually ignored):
4257 * NET_RX_SUCCESS: no congestion
4258 * NET_RX_DROP: packet was dropped
4259 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004260int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004261{
4262 trace_netif_receive_skb_entry(skb);
4263
4264 return netif_receive_skb_internal(skb);
4265}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004266EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267
Eric Dumazet41852492016-08-26 12:50:39 -07004268DEFINE_PER_CPU(struct work_struct, flush_works);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004269
4270/* Network device is going away, flush any packets still pending */
4271static void flush_backlog(struct work_struct *work)
4272{
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004273 struct sk_buff *skb, *tmp;
4274 struct softnet_data *sd;
4275
4276 local_bh_disable();
4277 sd = this_cpu_ptr(&softnet_data);
4278
4279 local_irq_disable();
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004280 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004281 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004282 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004283 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004284 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004285 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004286 }
Changli Gao6e7676c2010-04-27 15:07:33 -07004287 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004288 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004289 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004290
4291 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004292 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Changli Gao6e7676c2010-04-27 15:07:33 -07004293 __skb_unlink(skb, &sd->process_queue);
4294 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004295 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004296 }
4297 }
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004298 local_bh_enable();
4299}
4300
Eric Dumazet41852492016-08-26 12:50:39 -07004301static void flush_all_backlogs(void)
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004302{
4303 unsigned int cpu;
4304
4305 get_online_cpus();
4306
Eric Dumazet41852492016-08-26 12:50:39 -07004307 for_each_online_cpu(cpu)
4308 queue_work_on(cpu, system_highpri_wq,
4309 per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004310
4311 for_each_online_cpu(cpu)
Eric Dumazet41852492016-08-26 12:50:39 -07004312 flush_work(per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004313
4314 put_online_cpus();
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004315}
4316
Herbert Xud565b0a2008-12-15 23:38:52 -08004317static int napi_gro_complete(struct sk_buff *skb)
4318{
Vlad Yasevich22061d82012-11-15 08:49:11 +00004319 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004320 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004321 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08004322 int err = -ENOENT;
4323
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004324 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4325
Herbert Xufc59f9a2009-04-14 15:11:06 -07004326 if (NAPI_GRO_CB(skb)->count == 1) {
4327 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004328 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07004329 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004330
4331 rcu_read_lock();
4332 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004333 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08004334 continue;
4335
Jerry Chu299603e82013-12-11 20:53:45 -08004336 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004337 break;
4338 }
4339 rcu_read_unlock();
4340
4341 if (err) {
4342 WARN_ON(&ptype->list == head);
4343 kfree_skb(skb);
4344 return NET_RX_SUCCESS;
4345 }
4346
4347out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004348 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004349}
4350
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004351/* napi->gro_list contains packets ordered by age.
4352 * youngest packets at the head of it.
4353 * Complete skbs in reverse order to reduce latencies.
4354 */
4355void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08004356{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004357 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004358
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004359 /* scan list and build reverse chain */
4360 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4361 skb->prev = prev;
4362 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08004363 }
4364
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004365 for (skb = prev; skb; skb = prev) {
4366 skb->next = NULL;
4367
4368 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4369 return;
4370
4371 prev = skb->prev;
4372 napi_gro_complete(skb);
4373 napi->gro_count--;
4374 }
4375
Herbert Xud565b0a2008-12-15 23:38:52 -08004376 napi->gro_list = NULL;
4377}
Eric Dumazet86cac582010-08-31 18:25:32 +00004378EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08004379
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004380static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4381{
4382 struct sk_buff *p;
4383 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08004384 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004385
4386 for (p = napi->gro_list; p; p = p->next) {
4387 unsigned long diffs;
4388
Tom Herbert0b4cec82014-01-15 08:58:06 -08004389 NAPI_GRO_CB(p)->flush = 0;
4390
4391 if (hash != skb_get_hash_raw(p)) {
4392 NAPI_GRO_CB(p)->same_flow = 0;
4393 continue;
4394 }
4395
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004396 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4397 diffs |= p->vlan_tci ^ skb->vlan_tci;
Jesse Grossce87fc62016-01-20 17:59:49 -08004398 diffs |= skb_metadata_dst_cmp(p, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004399 if (maclen == ETH_HLEN)
4400 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004401 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004402 else if (!diffs)
4403 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004404 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004405 maclen);
4406 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004407 }
4408}
4409
Jerry Chu299603e82013-12-11 20:53:45 -08004410static void skb_gro_reset_offset(struct sk_buff *skb)
4411{
4412 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4413 const skb_frag_t *frag0 = &pinfo->frags[0];
4414
4415 NAPI_GRO_CB(skb)->data_offset = 0;
4416 NAPI_GRO_CB(skb)->frag0 = NULL;
4417 NAPI_GRO_CB(skb)->frag0_len = 0;
4418
4419 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4420 pinfo->nr_frags &&
4421 !PageHighMem(skb_frag_page(frag0))) {
4422 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
Eric Dumazet7cfd5fd2017-01-10 19:52:43 -08004423 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4424 skb_frag_size(frag0),
4425 skb->end - skb->tail);
Herbert Xud565b0a2008-12-15 23:38:52 -08004426 }
4427}
4428
Eric Dumazeta50e2332014-03-29 21:28:21 -07004429static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4430{
4431 struct skb_shared_info *pinfo = skb_shinfo(skb);
4432
4433 BUG_ON(skb->end - skb->tail < grow);
4434
4435 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4436
4437 skb->data_len -= grow;
4438 skb->tail += grow;
4439
4440 pinfo->frags[0].page_offset += grow;
4441 skb_frag_size_sub(&pinfo->frags[0], grow);
4442
4443 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4444 skb_frag_unref(skb, 0);
4445 memmove(pinfo->frags, pinfo->frags + 1,
4446 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4447 }
4448}
4449
Rami Rosenbb728822012-11-28 21:55:25 +00004450static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004451{
4452 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004453 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004454 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004455 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004456 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004457 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004458 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004459
Eric W. Biederman9c62a682014-03-14 20:51:52 -07004460 if (!(skb->dev->features & NETIF_F_GRO))
Herbert Xud565b0a2008-12-15 23:38:52 -08004461 goto normal;
4462
Eric Dumazetd61d0722016-11-07 11:12:27 -08004463 if (skb->csum_bad)
Herbert Xuf17f5c92009-01-14 14:36:12 -08004464 goto normal;
4465
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004466 gro_list_prepare(napi, skb);
4467
Herbert Xud565b0a2008-12-15 23:38:52 -08004468 rcu_read_lock();
4469 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004470 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004471 continue;
4472
Herbert Xu86911732009-01-29 14:19:50 +00004473 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004474 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004475 NAPI_GRO_CB(skb)->same_flow = 0;
Eric Dumazetd61d0722016-11-07 11:12:27 -08004476 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
Herbert Xu5d38a072009-01-04 16:13:40 -08004477 NAPI_GRO_CB(skb)->free = 0;
Jesse Grossfac8e0f2016-03-19 09:32:01 -07004478 NAPI_GRO_CB(skb)->encap_mark = 0;
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02004479 NAPI_GRO_CB(skb)->recursion_counter = 0;
Alexander Duycka0ca1532016-04-05 09:13:39 -07004480 NAPI_GRO_CB(skb)->is_fou = 0;
Alexander Duyck15305452016-04-10 21:44:57 -04004481 NAPI_GRO_CB(skb)->is_atomic = 1;
Tom Herbert15e23962015-02-10 16:30:31 -08004482 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004483
Tom Herbert662880f2014-08-27 21:26:56 -07004484 /* Setup for GRO checksum validation */
4485 switch (skb->ip_summed) {
4486 case CHECKSUM_COMPLETE:
4487 NAPI_GRO_CB(skb)->csum = skb->csum;
4488 NAPI_GRO_CB(skb)->csum_valid = 1;
4489 NAPI_GRO_CB(skb)->csum_cnt = 0;
4490 break;
4491 case CHECKSUM_UNNECESSARY:
4492 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4493 NAPI_GRO_CB(skb)->csum_valid = 0;
4494 break;
4495 default:
4496 NAPI_GRO_CB(skb)->csum_cnt = 0;
4497 NAPI_GRO_CB(skb)->csum_valid = 0;
4498 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004499
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004500 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004501 break;
4502 }
4503 rcu_read_unlock();
4504
4505 if (&ptype->list == head)
4506 goto normal;
4507
Steffen Klassert25393d32017-02-15 09:39:44 +01004508 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4509 ret = GRO_CONSUMED;
4510 goto ok;
4511 }
4512
Herbert Xu0da2afd52008-12-26 14:57:42 -08004513 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004514 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004515
Herbert Xud565b0a2008-12-15 23:38:52 -08004516 if (pp) {
4517 struct sk_buff *nskb = *pp;
4518
4519 *pp = nskb->next;
4520 nskb->next = NULL;
4521 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00004522 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08004523 }
4524
Herbert Xu0da2afd52008-12-26 14:57:42 -08004525 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08004526 goto ok;
4527
Eric Dumazet600adc12014-01-09 14:12:19 -08004528 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08004529 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08004530
Eric Dumazet600adc12014-01-09 14:12:19 -08004531 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4532 struct sk_buff *nskb = napi->gro_list;
4533
4534 /* locate the end of the list to select the 'oldest' flow */
4535 while (nskb->next) {
4536 pp = &nskb->next;
4537 nskb = *pp;
4538 }
4539 *pp = NULL;
4540 nskb->next = NULL;
4541 napi_gro_complete(nskb);
4542 } else {
4543 napi->gro_count++;
4544 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004545 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004546 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004547 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004548 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004549 skb->next = napi->gro_list;
4550 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004551 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004552
Herbert Xuad0f9902009-02-01 01:24:55 -08004553pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004554 grow = skb_gro_offset(skb) - skb_headlen(skb);
4555 if (grow > 0)
4556 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004557ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004558 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004559
4560normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004561 ret = GRO_NORMAL;
4562 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004563}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004564
Jerry Chubf5a7552014-01-07 10:23:19 -08004565struct packet_offload *gro_find_receive_by_type(__be16 type)
4566{
4567 struct list_head *offload_head = &offload_base;
4568 struct packet_offload *ptype;
4569
4570 list_for_each_entry_rcu(ptype, offload_head, list) {
4571 if (ptype->type != type || !ptype->callbacks.gro_receive)
4572 continue;
4573 return ptype;
4574 }
4575 return NULL;
4576}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004577EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004578
4579struct packet_offload *gro_find_complete_by_type(__be16 type)
4580{
4581 struct list_head *offload_head = &offload_base;
4582 struct packet_offload *ptype;
4583
4584 list_for_each_entry_rcu(ptype, offload_head, list) {
4585 if (ptype->type != type || !ptype->callbacks.gro_complete)
4586 continue;
4587 return ptype;
4588 }
4589 return NULL;
4590}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004591EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004592
Rami Rosenbb728822012-11-28 21:55:25 +00004593static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004594{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004595 switch (ret) {
4596 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004597 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004598 ret = GRO_DROP;
4599 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004600
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004601 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004602 kfree_skb(skb);
4603 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004604
Eric Dumazetdaa86542012-04-19 07:07:40 +00004605 case GRO_MERGED_FREE:
Jesse Grossce87fc62016-01-20 17:59:49 -08004606 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4607 skb_dst_drop(skb);
Steffen Klassertf991bb92017-01-30 06:45:38 +01004608 secpath_reset(skb);
Eric Dumazetd7e88832012-04-30 08:10:34 +00004609 kmem_cache_free(skbuff_head_cache, skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004610 } else {
Eric Dumazetd7e88832012-04-30 08:10:34 +00004611 __kfree_skb(skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004612 }
Eric Dumazetdaa86542012-04-19 07:07:40 +00004613 break;
4614
Ben Hutchings5b252f02009-10-29 07:17:09 +00004615 case GRO_HELD:
4616 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004617 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004618 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004619 }
4620
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004621 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004622}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004623
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004624gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004625{
Eric Dumazet93f93a42015-11-18 06:30:59 -08004626 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004627 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004628
Eric Dumazeta50e2332014-03-29 21:28:21 -07004629 skb_gro_reset_offset(skb);
4630
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004631 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004632}
4633EXPORT_SYMBOL(napi_gro_receive);
4634
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004635static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004636{
Eric Dumazet93a35f52014-10-23 06:30:30 -07004637 if (unlikely(skb->pfmemalloc)) {
4638 consume_skb(skb);
4639 return;
4640 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004641 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004642 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4643 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004644 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004645 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004646 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07004647 skb->encapsulation = 0;
4648 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004649 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Steffen Klassertf991bb92017-01-30 06:45:38 +01004650 secpath_reset(skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004651
4652 napi->skb = skb;
4653}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004654
Herbert Xu76620aa2009-04-16 02:02:07 -07004655struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004656{
Herbert Xu5d38a072009-01-04 16:13:40 -08004657 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004658
4659 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08004660 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08004661 if (skb) {
4662 napi->skb = skb;
4663 skb_mark_napi_id(skb, napi);
4664 }
Herbert Xu5d38a072009-01-04 16:13:40 -08004665 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004666 return skb;
4667}
Herbert Xu76620aa2009-04-16 02:02:07 -07004668EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004669
Eric Dumazeta50e2332014-03-29 21:28:21 -07004670static gro_result_t napi_frags_finish(struct napi_struct *napi,
4671 struct sk_buff *skb,
4672 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004673{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004674 switch (ret) {
4675 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004676 case GRO_HELD:
4677 __skb_push(skb, ETH_HLEN);
4678 skb->protocol = eth_type_trans(skb, skb->dev);
4679 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004680 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004681 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004682
4683 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004684 case GRO_MERGED_FREE:
4685 napi_reuse_skb(napi, skb);
4686 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004687
4688 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004689 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004690 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004691 }
4692
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004693 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004694}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004695
Eric Dumazeta50e2332014-03-29 21:28:21 -07004696/* Upper GRO stack assumes network header starts at gro_offset=0
4697 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4698 * We copy ethernet header into skb->data to have a common layout.
4699 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004700static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004701{
Herbert Xu76620aa2009-04-16 02:02:07 -07004702 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004703 const struct ethhdr *eth;
4704 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07004705
4706 napi->skb = NULL;
4707
Eric Dumazeta50e2332014-03-29 21:28:21 -07004708 skb_reset_mac_header(skb);
4709 skb_gro_reset_offset(skb);
4710
4711 eth = skb_gro_header_fast(skb, 0);
4712 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4713 eth = skb_gro_header_slow(skb, hlen, 0);
4714 if (unlikely(!eth)) {
Aaron Conole4da46ce2016-04-02 15:26:43 -04004715 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
4716 __func__, napi->dev->name);
Eric Dumazeta50e2332014-03-29 21:28:21 -07004717 napi_reuse_skb(napi, skb);
4718 return NULL;
4719 }
4720 } else {
4721 gro_pull_from_frag0(skb, hlen);
4722 NAPI_GRO_CB(skb)->frag0 += hlen;
4723 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07004724 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07004725 __skb_pull(skb, hlen);
4726
4727 /*
4728 * This works because the only protocols we care about don't require
4729 * special handling.
4730 * We'll fix it up properly in napi_frags_finish()
4731 */
4732 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07004733
Herbert Xu76620aa2009-04-16 02:02:07 -07004734 return skb;
4735}
Herbert Xu76620aa2009-04-16 02:02:07 -07004736
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004737gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004738{
4739 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004740
4741 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004742 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004743
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004744 trace_napi_gro_frags_entry(skb);
4745
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004746 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004747}
4748EXPORT_SYMBOL(napi_gro_frags);
4749
Tom Herbert573e8fc2014-08-22 13:33:47 -07004750/* Compute the checksum from gro_offset and return the folded value
4751 * after adding in any pseudo checksum.
4752 */
4753__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4754{
4755 __wsum wsum;
4756 __sum16 sum;
4757
4758 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4759
4760 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4761 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4762 if (likely(!sum)) {
4763 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4764 !skb->csum_complete_sw)
4765 netdev_rx_csum_fault(skb->dev);
4766 }
4767
4768 NAPI_GRO_CB(skb)->csum = wsum;
4769 NAPI_GRO_CB(skb)->csum_valid = 1;
4770
4771 return sum;
4772}
4773EXPORT_SYMBOL(__skb_gro_checksum_complete);
4774
Eric Dumazete326bed2010-04-22 00:22:45 -07004775/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08004776 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07004777 * Note: called with local irq disabled, but exits with local irq enabled.
4778 */
4779static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4780{
4781#ifdef CONFIG_RPS
4782 struct softnet_data *remsd = sd->rps_ipi_list;
4783
4784 if (remsd) {
4785 sd->rps_ipi_list = NULL;
4786
4787 local_irq_enable();
4788
4789 /* Send pending IPI's to kick RPS processing on remote cpus. */
4790 while (remsd) {
4791 struct softnet_data *next = remsd->rps_ipi_next;
4792
4793 if (cpu_online(remsd->cpu))
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +01004794 smp_call_function_single_async(remsd->cpu,
Frederic Weisbeckerfce8ad12014-02-24 16:40:01 +01004795 &remsd->csd);
Eric Dumazete326bed2010-04-22 00:22:45 -07004796 remsd = next;
4797 }
4798 } else
4799#endif
4800 local_irq_enable();
4801}
4802
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004803static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4804{
4805#ifdef CONFIG_RPS
4806 return sd->rps_ipi_list != NULL;
4807#else
4808 return false;
4809#endif
4810}
4811
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004812static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813{
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004814 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004815 bool again = true;
4816 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817
Eric Dumazete326bed2010-04-22 00:22:45 -07004818 /* Check if we have pending ipi, its better to send them now,
4819 * not waiting net_rx_action() end.
4820 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004821 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07004822 local_irq_disable();
4823 net_rps_action_and_irq_enable(sd);
4824 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004825
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01004826 napi->weight = dev_rx_weight;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004827 while (again) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829
Changli Gao6e7676c2010-04-27 15:07:33 -07004830 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03004831 rcu_read_lock();
Changli Gao6e7676c2010-04-27 15:07:33 -07004832 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03004833 rcu_read_unlock();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004834 input_queue_head_incr(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004835 if (++work >= quota)
Tom Herbert76cc8b12010-05-20 18:37:59 +00004836 return work;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004837
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004839
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004840 local_irq_disable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004841 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07004842 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004843 /*
4844 * Inline a custom version of __napi_complete().
4845 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07004846 * and NAPI_STATE_SCHED is the only possible flag set
4847 * on backlog.
4848 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004849 * and we dont need an smp_mb() memory barrier.
4850 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004851 napi->state = 0;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004852 again = false;
4853 } else {
4854 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4855 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07004856 }
4857 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004858 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004859 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004861 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862}
4863
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004864/**
4865 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004866 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004867 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07004868 * The entry's receive function will be scheduled to run.
4869 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004870 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004871void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004872{
4873 unsigned long flags;
4874
4875 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05004876 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004877 local_irq_restore(flags);
4878}
4879EXPORT_SYMBOL(__napi_schedule);
4880
Eric Dumazetbc9ad162014-10-28 18:05:13 -07004881/**
4882 * __napi_schedule_irqoff - schedule for receive
4883 * @n: entry to schedule
4884 *
4885 * Variant of __napi_schedule() assuming hard irqs are masked
4886 */
4887void __napi_schedule_irqoff(struct napi_struct *n)
4888{
4889 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4890}
4891EXPORT_SYMBOL(__napi_schedule_irqoff);
4892
Eric Dumazet364b6052016-11-15 10:15:13 -08004893bool napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08004894{
4895 unsigned long flags;
4896
4897 /*
Eric Dumazet217f6972016-11-15 10:15:11 -08004898 * 1) Don't let napi dequeue from the cpu poll list
4899 * just in case its running on a different cpu.
4900 * 2) If we are busy polling, do nothing here, we have
4901 * the guarantee we will be called later.
Herbert Xud565b0a2008-12-15 23:38:52 -08004902 */
Eric Dumazet217f6972016-11-15 10:15:11 -08004903 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
4904 NAPIF_STATE_IN_BUSY_POLL)))
Eric Dumazet364b6052016-11-15 10:15:13 -08004905 return false;
Herbert Xud565b0a2008-12-15 23:38:52 -08004906
Eric Dumazet3b47d302014-11-06 21:09:44 -08004907 if (n->gro_list) {
4908 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004909
Eric Dumazet3b47d302014-11-06 21:09:44 -08004910 if (work_done)
4911 timeout = n->dev->gro_flush_timeout;
4912
4913 if (timeout)
4914 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4915 HRTIMER_MODE_REL_PINNED);
4916 else
4917 napi_gro_flush(n, false);
4918 }
Eric Dumazet02c16022017-02-04 15:25:02 -08004919 if (unlikely(!list_empty(&n->poll_list))) {
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004920 /* If n->poll_list is not empty, we need to mask irqs */
4921 local_irq_save(flags);
Eric Dumazet02c16022017-02-04 15:25:02 -08004922 list_del_init(&n->poll_list);
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004923 local_irq_restore(flags);
4924 }
Eric Dumazet02c16022017-02-04 15:25:02 -08004925 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
Eric Dumazet364b6052016-11-15 10:15:13 -08004926 return true;
Herbert Xud565b0a2008-12-15 23:38:52 -08004927}
Eric Dumazet3b47d302014-11-06 21:09:44 -08004928EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08004929
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004930/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08004931static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004932{
4933 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4934 struct napi_struct *napi;
4935
4936 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4937 if (napi->napi_id == napi_id)
4938 return napi;
4939
4940 return NULL;
4941}
Eric Dumazet02d62e82015-11-18 06:30:52 -08004942
4943#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazet217f6972016-11-15 10:15:11 -08004944
Eric Dumazetce6aea92015-11-18 06:30:54 -08004945#define BUSY_POLL_BUDGET 8
Eric Dumazet217f6972016-11-15 10:15:11 -08004946
4947static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
4948{
4949 int rc;
4950
4951 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
4952
4953 local_bh_disable();
4954
4955 /* All we really want here is to re-enable device interrupts.
4956 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
4957 */
4958 rc = napi->poll(napi, BUSY_POLL_BUDGET);
4959 netpoll_poll_unlock(have_poll_lock);
4960 if (rc == BUSY_POLL_BUDGET)
4961 __napi_schedule(napi);
4962 local_bh_enable();
4963 if (local_softirq_pending())
4964 do_softirq();
4965}
4966
Eric Dumazet02d62e82015-11-18 06:30:52 -08004967bool sk_busy_loop(struct sock *sk, int nonblock)
4968{
4969 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
Eric Dumazet217f6972016-11-15 10:15:11 -08004970 int (*napi_poll)(struct napi_struct *napi, int budget);
Eric Dumazet217f6972016-11-15 10:15:11 -08004971 void *have_poll_lock = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08004972 struct napi_struct *napi;
Eric Dumazet217f6972016-11-15 10:15:11 -08004973 int rc;
4974
4975restart:
4976 rc = false;
4977 napi_poll = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08004978
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004979 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004980
4981 napi = napi_by_id(sk->sk_napi_id);
4982 if (!napi)
4983 goto out;
4984
Eric Dumazet217f6972016-11-15 10:15:11 -08004985 preempt_disable();
4986 for (;;) {
Eric Dumazetce6aea92015-11-18 06:30:54 -08004987 rc = 0;
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004988 local_bh_disable();
Eric Dumazet217f6972016-11-15 10:15:11 -08004989 if (!napi_poll) {
4990 unsigned long val = READ_ONCE(napi->state);
4991
4992 /* If multiple threads are competing for this napi,
4993 * we avoid dirtying napi->state as much as we can.
4994 */
4995 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
4996 NAPIF_STATE_IN_BUSY_POLL))
4997 goto count;
4998 if (cmpxchg(&napi->state, val,
4999 val | NAPIF_STATE_IN_BUSY_POLL |
5000 NAPIF_STATE_SCHED) != val)
5001 goto count;
5002 have_poll_lock = netpoll_poll_lock(napi);
5003 napi_poll = napi->poll;
5004 }
5005 rc = napi_poll(napi, BUSY_POLL_BUDGET);
5006 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
5007count:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005008 if (rc > 0)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07005009 __NET_ADD_STATS(sock_net(sk),
5010 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005011 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005012
5013 if (rc == LL_FLUSH_FAILED)
5014 break; /* permanent failure */
5015
Eric Dumazet217f6972016-11-15 10:15:11 -08005016 if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) ||
5017 busy_loop_timeout(end_time))
5018 break;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005019
Eric Dumazet217f6972016-11-15 10:15:11 -08005020 if (unlikely(need_resched())) {
5021 if (napi_poll)
5022 busy_poll_stop(napi, have_poll_lock);
5023 preempt_enable();
5024 rcu_read_unlock();
5025 cond_resched();
5026 rc = !skb_queue_empty(&sk->sk_receive_queue);
5027 if (rc || busy_loop_timeout(end_time))
5028 return rc;
5029 goto restart;
5030 }
Linus Torvalds6cdf89b2016-12-12 10:48:02 -08005031 cpu_relax();
Eric Dumazet217f6972016-11-15 10:15:11 -08005032 }
5033 if (napi_poll)
5034 busy_poll_stop(napi, have_poll_lock);
5035 preempt_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005036 rc = !skb_queue_empty(&sk->sk_receive_queue);
5037out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005038 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005039 return rc;
5040}
5041EXPORT_SYMBOL(sk_busy_loop);
5042
5043#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005044
Eric Dumazet149d6ad2016-11-08 11:07:28 -08005045static void napi_hash_add(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005046{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005047 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5048 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005049 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005050
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005051 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005052
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005053 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
5054 do {
5055 if (unlikely(++napi_gen_id < NR_CPUS + 1))
5056 napi_gen_id = NR_CPUS + 1;
5057 } while (napi_by_id(napi_gen_id));
5058 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005059
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005060 hlist_add_head_rcu(&napi->napi_hash_node,
5061 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005062
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005063 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005064}
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005065
5066/* Warning : caller is responsible to make sure rcu grace period
5067 * is respected before freeing memory containing @napi
5068 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08005069bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005070{
Eric Dumazet34cbe272015-11-18 06:31:02 -08005071 bool rcu_sync_needed = false;
5072
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005073 spin_lock(&napi_hash_lock);
5074
Eric Dumazet34cbe272015-11-18 06:31:02 -08005075 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5076 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005077 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005078 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005079 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005080 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005081}
5082EXPORT_SYMBOL_GPL(napi_hash_del);
5083
Eric Dumazet3b47d302014-11-06 21:09:44 -08005084static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5085{
5086 struct napi_struct *napi;
5087
5088 napi = container_of(timer, struct napi_struct, timer);
5089 if (napi->gro_list)
5090 napi_schedule(napi);
5091
5092 return HRTIMER_NORESTART;
5093}
5094
Herbert Xud565b0a2008-12-15 23:38:52 -08005095void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5096 int (*poll)(struct napi_struct *, int), int weight)
5097{
5098 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005099 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5100 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00005101 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005102 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08005103 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08005104 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00005105 if (weight > NAPI_POLL_WEIGHT)
5106 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5107 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08005108 napi->weight = weight;
5109 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005110 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08005111#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08005112 napi->poll_owner = -1;
5113#endif
5114 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08005115 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005116}
5117EXPORT_SYMBOL(netif_napi_add);
5118
Eric Dumazet3b47d302014-11-06 21:09:44 -08005119void napi_disable(struct napi_struct *n)
5120{
5121 might_sleep();
5122 set_bit(NAPI_STATE_DISABLE, &n->state);
5123
5124 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5125 msleep(1);
Neil Horman2d8bff1262015-09-23 14:57:58 -04005126 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5127 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005128
5129 hrtimer_cancel(&n->timer);
5130
5131 clear_bit(NAPI_STATE_DISABLE, &n->state);
5132}
5133EXPORT_SYMBOL(napi_disable);
5134
Eric Dumazet93d05d42015-11-18 06:31:03 -08005135/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08005136void netif_napi_del(struct napi_struct *napi)
5137{
Eric Dumazet93d05d42015-11-18 06:31:03 -08005138 might_sleep();
5139 if (napi_hash_del(napi))
5140 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08005141 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07005142 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005143
Eric Dumazet289dccb2013-12-20 14:29:08 -08005144 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005145 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005146 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005147}
5148EXPORT_SYMBOL(netif_napi_del);
5149
Herbert Xu726ce702014-12-21 07:16:21 +11005150static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5151{
5152 void *have;
5153 int work, weight;
5154
5155 list_del_init(&n->poll_list);
5156
5157 have = netpoll_poll_lock(n);
5158
5159 weight = n->weight;
5160
5161 /* This NAPI_STATE_SCHED test is for avoiding a race
5162 * with netpoll's poll_napi(). Only the entity which
5163 * obtains the lock and sees NAPI_STATE_SCHED set will
5164 * actually make the ->poll() call. Therefore we avoid
5165 * accidentally calling ->poll() when NAPI is not scheduled.
5166 */
5167 work = 0;
5168 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5169 work = n->poll(n, weight);
Jesper Dangaard Brouer1db19db2016-07-07 18:01:32 +02005170 trace_napi_poll(n, work, weight);
Herbert Xu726ce702014-12-21 07:16:21 +11005171 }
5172
5173 WARN_ON_ONCE(work > weight);
5174
5175 if (likely(work < weight))
5176 goto out_unlock;
5177
5178 /* Drivers must not modify the NAPI state if they
5179 * consume the entire weight. In such cases this code
5180 * still "owns" the NAPI instance and therefore can
5181 * move the instance around on the list at-will.
5182 */
5183 if (unlikely(napi_disable_pending(n))) {
5184 napi_complete(n);
5185 goto out_unlock;
5186 }
5187
5188 if (n->gro_list) {
5189 /* flush too old packets
5190 * If HZ < 1000, flush all packets.
5191 */
5192 napi_gro_flush(n, HZ >= 1000);
5193 }
5194
Herbert Xu001ce542014-12-21 07:16:22 +11005195 /* Some drivers may have called napi_schedule
5196 * prior to exhausting their budget.
5197 */
5198 if (unlikely(!list_empty(&n->poll_list))) {
5199 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5200 n->dev ? n->dev->name : "backlog");
5201 goto out_unlock;
5202 }
5203
Herbert Xu726ce702014-12-21 07:16:21 +11005204 list_add_tail(&n->poll_list, repoll);
5205
5206out_unlock:
5207 netpoll_poll_unlock(have);
5208
5209 return work;
5210}
5211
Emese Revfy0766f782016-06-20 20:42:34 +02005212static __latent_entropy void net_rx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213{
Christoph Lameter903ceff2014-08-17 12:30:35 -05005214 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005215 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07005216 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005217 LIST_HEAD(list);
5218 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07005219
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005221 list_splice_init(&sd->poll_list, &list);
5222 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005224 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005225 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005227 if (list_empty(&list)) {
5228 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005229 goto out;
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005230 break;
5231 }
5232
Herbert Xu6bd373e2014-12-21 07:16:24 +11005233 n = list_first_entry(&list, struct napi_struct, poll_list);
5234 budget -= napi_poll(n, &repoll);
5235
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005236 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005237 * Allow this to run for 2 jiffies since which will allow
5238 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005239 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005240 if (unlikely(budget <= 0 ||
5241 time_after_eq(jiffies, time_limit))) {
5242 sd->time_squeeze++;
5243 break;
5244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005246
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005247 local_irq_disable();
5248
5249 list_splice_tail_init(&sd->poll_list, &list);
5250 list_splice_tail(&repoll, &list);
5251 list_splice(&list, &sd->poll_list);
5252 if (!list_empty(&sd->poll_list))
5253 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5254
Eric Dumazete326bed2010-04-22 00:22:45 -07005255 net_rps_action_and_irq_enable(sd);
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005256out:
5257 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258}
5259
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005260struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005261 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005262
5263 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005264 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005265
Veaceslav Falico5d261912013-08-28 23:25:05 +02005266 /* counter for the number of times this device was added to us */
5267 u16 ref_nr;
5268
Veaceslav Falico402dae92013-09-25 09:20:09 +02005269 /* private field for the users */
5270 void *private;
5271
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005272 struct list_head list;
5273 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005274};
5275
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005276static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005277 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005278{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005279 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005280
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005281 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005282 if (adj->dev == adj_dev)
5283 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005284 }
5285 return NULL;
5286}
5287
David Ahernf1170fd2016-10-17 19:15:51 -07005288static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5289{
5290 struct net_device *dev = data;
5291
5292 return upper_dev == dev;
5293}
5294
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005295/**
5296 * netdev_has_upper_dev - Check if device is linked to an upper device
5297 * @dev: device
5298 * @upper_dev: upper device to check
5299 *
5300 * Find out if a device is linked to specified upper device and return true
5301 * in case it is. Note that this checks only immediate upper device,
5302 * not through a complete stack of devices. The caller must hold the RTNL lock.
5303 */
5304bool netdev_has_upper_dev(struct net_device *dev,
5305 struct net_device *upper_dev)
5306{
5307 ASSERT_RTNL();
5308
David Ahernf1170fd2016-10-17 19:15:51 -07005309 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5310 upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005311}
5312EXPORT_SYMBOL(netdev_has_upper_dev);
5313
5314/**
David Ahern1a3f0602016-10-17 19:15:44 -07005315 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5316 * @dev: device
5317 * @upper_dev: upper device to check
5318 *
5319 * Find out if a device is linked to specified upper device and return true
5320 * in case it is. Note that this checks the entire upper device chain.
5321 * The caller must hold rcu lock.
5322 */
5323
David Ahern1a3f0602016-10-17 19:15:44 -07005324bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5325 struct net_device *upper_dev)
5326{
5327 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5328 upper_dev);
5329}
5330EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5331
5332/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005333 * netdev_has_any_upper_dev - Check if device is linked to some device
5334 * @dev: device
5335 *
5336 * Find out if a device is linked to an upper device and return true in case
5337 * it is. The caller must hold the RTNL lock.
5338 */
stephen hemminger1d143d92013-12-29 14:01:29 -08005339static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005340{
5341 ASSERT_RTNL();
5342
David Ahernf1170fd2016-10-17 19:15:51 -07005343 return !list_empty(&dev->adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005344}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005345
5346/**
5347 * netdev_master_upper_dev_get - Get master upper device
5348 * @dev: device
5349 *
5350 * Find a master upper device and return pointer to it or NULL in case
5351 * it's not there. The caller must hold the RTNL lock.
5352 */
5353struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5354{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005355 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005356
5357 ASSERT_RTNL();
5358
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005359 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005360 return NULL;
5361
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005362 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005363 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005364 if (likely(upper->master))
5365 return upper->dev;
5366 return NULL;
5367}
5368EXPORT_SYMBOL(netdev_master_upper_dev_get);
5369
David Ahern0f524a82016-10-17 19:15:52 -07005370/**
5371 * netdev_has_any_lower_dev - Check if device is linked to some device
5372 * @dev: device
5373 *
5374 * Find out if a device is linked to a lower device and return true in case
5375 * it is. The caller must hold the RTNL lock.
5376 */
5377static bool netdev_has_any_lower_dev(struct net_device *dev)
5378{
5379 ASSERT_RTNL();
5380
5381 return !list_empty(&dev->adj_list.lower);
5382}
5383
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02005384void *netdev_adjacent_get_private(struct list_head *adj_list)
5385{
5386 struct netdev_adjacent *adj;
5387
5388 adj = list_entry(adj_list, struct netdev_adjacent, list);
5389
5390 return adj->private;
5391}
5392EXPORT_SYMBOL(netdev_adjacent_get_private);
5393
Veaceslav Falico31088a12013-09-25 09:20:12 +02005394/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04005395 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5396 * @dev: device
5397 * @iter: list_head ** of the current position
5398 *
5399 * Gets the next device from the dev's upper list, starting from iter
5400 * position. The caller must hold RCU read lock.
5401 */
5402struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5403 struct list_head **iter)
5404{
5405 struct netdev_adjacent *upper;
5406
5407 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5408
5409 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5410
5411 if (&upper->list == &dev->adj_list.upper)
5412 return NULL;
5413
5414 *iter = &upper->list;
5415
5416 return upper->dev;
5417}
5418EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5419
David Ahern1a3f0602016-10-17 19:15:44 -07005420static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5421 struct list_head **iter)
5422{
5423 struct netdev_adjacent *upper;
5424
5425 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5426
5427 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5428
5429 if (&upper->list == &dev->adj_list.upper)
5430 return NULL;
5431
5432 *iter = &upper->list;
5433
5434 return upper->dev;
5435}
5436
5437int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5438 int (*fn)(struct net_device *dev,
5439 void *data),
5440 void *data)
5441{
5442 struct net_device *udev;
5443 struct list_head *iter;
5444 int ret;
5445
5446 for (iter = &dev->adj_list.upper,
5447 udev = netdev_next_upper_dev_rcu(dev, &iter);
5448 udev;
5449 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5450 /* first is the upper device itself */
5451 ret = fn(udev, data);
5452 if (ret)
5453 return ret;
5454
5455 /* then look at all of its upper devices */
5456 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5457 if (ret)
5458 return ret;
5459 }
5460
5461 return 0;
5462}
5463EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5464
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005465/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005466 * netdev_lower_get_next_private - Get the next ->private from the
5467 * lower neighbour list
5468 * @dev: device
5469 * @iter: list_head ** of the current position
5470 *
5471 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5472 * list, starting from iter position. The caller must hold either hold the
5473 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005474 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02005475 */
5476void *netdev_lower_get_next_private(struct net_device *dev,
5477 struct list_head **iter)
5478{
5479 struct netdev_adjacent *lower;
5480
5481 lower = list_entry(*iter, struct netdev_adjacent, list);
5482
5483 if (&lower->list == &dev->adj_list.lower)
5484 return NULL;
5485
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005486 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005487
5488 return lower->private;
5489}
5490EXPORT_SYMBOL(netdev_lower_get_next_private);
5491
5492/**
5493 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5494 * lower neighbour list, RCU
5495 * variant
5496 * @dev: device
5497 * @iter: list_head ** of the current position
5498 *
5499 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5500 * list, starting from iter position. The caller must hold RCU read lock.
5501 */
5502void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5503 struct list_head **iter)
5504{
5505 struct netdev_adjacent *lower;
5506
5507 WARN_ON_ONCE(!rcu_read_lock_held());
5508
5509 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5510
5511 if (&lower->list == &dev->adj_list.lower)
5512 return NULL;
5513
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005514 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005515
5516 return lower->private;
5517}
5518EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5519
5520/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005521 * netdev_lower_get_next - Get the next device from the lower neighbour
5522 * list
5523 * @dev: device
5524 * @iter: list_head ** of the current position
5525 *
5526 * Gets the next netdev_adjacent from the dev's lower neighbour
5527 * list, starting from iter position. The caller must hold RTNL lock or
5528 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005529 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005530 */
5531void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5532{
5533 struct netdev_adjacent *lower;
5534
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005535 lower = list_entry(*iter, struct netdev_adjacent, list);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005536
5537 if (&lower->list == &dev->adj_list.lower)
5538 return NULL;
5539
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005540 *iter = lower->list.next;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005541
5542 return lower->dev;
5543}
5544EXPORT_SYMBOL(netdev_lower_get_next);
5545
David Ahern1a3f0602016-10-17 19:15:44 -07005546static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5547 struct list_head **iter)
5548{
5549 struct netdev_adjacent *lower;
5550
David Ahern46b5ab12016-10-26 13:21:33 -07005551 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
David Ahern1a3f0602016-10-17 19:15:44 -07005552
5553 if (&lower->list == &dev->adj_list.lower)
5554 return NULL;
5555
David Ahern46b5ab12016-10-26 13:21:33 -07005556 *iter = &lower->list;
David Ahern1a3f0602016-10-17 19:15:44 -07005557
5558 return lower->dev;
5559}
5560
5561int netdev_walk_all_lower_dev(struct net_device *dev,
5562 int (*fn)(struct net_device *dev,
5563 void *data),
5564 void *data)
5565{
5566 struct net_device *ldev;
5567 struct list_head *iter;
5568 int ret;
5569
5570 for (iter = &dev->adj_list.lower,
5571 ldev = netdev_next_lower_dev(dev, &iter);
5572 ldev;
5573 ldev = netdev_next_lower_dev(dev, &iter)) {
5574 /* first is the lower device itself */
5575 ret = fn(ldev, data);
5576 if (ret)
5577 return ret;
5578
5579 /* then look at all of its lower devices */
5580 ret = netdev_walk_all_lower_dev(ldev, fn, data);
5581 if (ret)
5582 return ret;
5583 }
5584
5585 return 0;
5586}
5587EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
5588
David Ahern1a3f0602016-10-17 19:15:44 -07005589static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5590 struct list_head **iter)
5591{
5592 struct netdev_adjacent *lower;
5593
5594 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5595 if (&lower->list == &dev->adj_list.lower)
5596 return NULL;
5597
5598 *iter = &lower->list;
5599
5600 return lower->dev;
5601}
5602
5603int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5604 int (*fn)(struct net_device *dev,
5605 void *data),
5606 void *data)
5607{
5608 struct net_device *ldev;
5609 struct list_head *iter;
5610 int ret;
5611
5612 for (iter = &dev->adj_list.lower,
5613 ldev = netdev_next_lower_dev_rcu(dev, &iter);
5614 ldev;
5615 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
5616 /* first is the lower device itself */
5617 ret = fn(ldev, data);
5618 if (ret)
5619 return ret;
5620
5621 /* then look at all of its lower devices */
5622 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
5623 if (ret)
5624 return ret;
5625 }
5626
5627 return 0;
5628}
5629EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
5630
Jiri Pirko7ce856a2016-07-04 08:23:12 +02005631/**
dingtianhonge001bfa2013-12-13 10:19:55 +08005632 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5633 * lower neighbour list, RCU
5634 * variant
5635 * @dev: device
5636 *
5637 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5638 * list. The caller must hold RCU read lock.
5639 */
5640void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5641{
5642 struct netdev_adjacent *lower;
5643
5644 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5645 struct netdev_adjacent, list);
5646 if (lower)
5647 return lower->private;
5648 return NULL;
5649}
5650EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5651
5652/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005653 * netdev_master_upper_dev_get_rcu - Get master upper device
5654 * @dev: device
5655 *
5656 * Find a master upper device and return pointer to it or NULL in case
5657 * it's not there. The caller must hold the RCU read lock.
5658 */
5659struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5660{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005661 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005662
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005663 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005664 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005665 if (upper && likely(upper->master))
5666 return upper->dev;
5667 return NULL;
5668}
5669EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5670
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305671static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005672 struct net_device *adj_dev,
5673 struct list_head *dev_list)
5674{
5675 char linkname[IFNAMSIZ+7];
5676 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5677 "upper_%s" : "lower_%s", adj_dev->name);
5678 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5679 linkname);
5680}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305681static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005682 char *name,
5683 struct list_head *dev_list)
5684{
5685 char linkname[IFNAMSIZ+7];
5686 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5687 "upper_%s" : "lower_%s", name);
5688 sysfs_remove_link(&(dev->dev.kobj), linkname);
5689}
5690
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005691static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5692 struct net_device *adj_dev,
5693 struct list_head *dev_list)
5694{
5695 return (dev_list == &dev->adj_list.upper ||
5696 dev_list == &dev->adj_list.lower) &&
5697 net_eq(dev_net(dev), dev_net(adj_dev));
5698}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005699
Veaceslav Falico5d261912013-08-28 23:25:05 +02005700static int __netdev_adjacent_dev_insert(struct net_device *dev,
5701 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02005702 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005703 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005704{
5705 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005706 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005707
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005708 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005709
5710 if (adj) {
David Ahern790510d2016-10-17 19:15:43 -07005711 adj->ref_nr += 1;
David Ahern67b62f92016-10-17 19:15:53 -07005712 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
5713 dev->name, adj_dev->name, adj->ref_nr);
5714
Veaceslav Falico5d261912013-08-28 23:25:05 +02005715 return 0;
5716 }
5717
5718 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5719 if (!adj)
5720 return -ENOMEM;
5721
5722 adj->dev = adj_dev;
5723 adj->master = master;
David Ahern790510d2016-10-17 19:15:43 -07005724 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005725 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005726 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005727
David Ahern67b62f92016-10-17 19:15:53 -07005728 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
5729 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005730
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005731 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005732 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02005733 if (ret)
5734 goto free_adj;
5735 }
5736
Veaceslav Falico7863c052013-09-25 09:20:06 +02005737 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005738 if (master) {
5739 ret = sysfs_create_link(&(dev->dev.kobj),
5740 &(adj_dev->dev.kobj), "master");
5741 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02005742 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005743
Veaceslav Falico7863c052013-09-25 09:20:06 +02005744 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005745 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02005746 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005747 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005748
5749 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005750
Veaceslav Falico5831d662013-09-25 09:20:32 +02005751remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005752 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005753 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005754free_adj:
5755 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02005756 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005757
5758 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005759}
5760
stephen hemminger1d143d92013-12-29 14:01:29 -08005761static void __netdev_adjacent_dev_remove(struct net_device *dev,
5762 struct net_device *adj_dev,
Andrew Collins93409032016-10-03 13:43:02 -06005763 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08005764 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005765{
5766 struct netdev_adjacent *adj;
5767
David Ahern67b62f92016-10-17 19:15:53 -07005768 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
5769 dev->name, adj_dev->name, ref_nr);
5770
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005771 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005772
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005773 if (!adj) {
David Ahern67b62f92016-10-17 19:15:53 -07005774 pr_err("Adjacency does not exist for device %s from %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005775 dev->name, adj_dev->name);
David Ahern67b62f92016-10-17 19:15:53 -07005776 WARN_ON(1);
5777 return;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005778 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005779
Andrew Collins93409032016-10-03 13:43:02 -06005780 if (adj->ref_nr > ref_nr) {
David Ahern67b62f92016-10-17 19:15:53 -07005781 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
5782 dev->name, adj_dev->name, ref_nr,
5783 adj->ref_nr - ref_nr);
Andrew Collins93409032016-10-03 13:43:02 -06005784 adj->ref_nr -= ref_nr;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005785 return;
5786 }
5787
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005788 if (adj->master)
5789 sysfs_remove_link(&(dev->dev.kobj), "master");
5790
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005791 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005792 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02005793
Veaceslav Falico5d261912013-08-28 23:25:05 +02005794 list_del_rcu(&adj->list);
David Ahern67b62f92016-10-17 19:15:53 -07005795 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005796 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005797 dev_put(adj_dev);
5798 kfree_rcu(adj, rcu);
5799}
5800
stephen hemminger1d143d92013-12-29 14:01:29 -08005801static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5802 struct net_device *upper_dev,
5803 struct list_head *up_list,
5804 struct list_head *down_list,
5805 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005806{
5807 int ret;
5808
David Ahern790510d2016-10-17 19:15:43 -07005809 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
Andrew Collins93409032016-10-03 13:43:02 -06005810 private, master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005811 if (ret)
5812 return ret;
5813
David Ahern790510d2016-10-17 19:15:43 -07005814 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
Andrew Collins93409032016-10-03 13:43:02 -06005815 private, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005816 if (ret) {
David Ahern790510d2016-10-17 19:15:43 -07005817 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005818 return ret;
5819 }
5820
5821 return 0;
5822}
5823
stephen hemminger1d143d92013-12-29 14:01:29 -08005824static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5825 struct net_device *upper_dev,
Andrew Collins93409032016-10-03 13:43:02 -06005826 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08005827 struct list_head *up_list,
5828 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005829{
Andrew Collins93409032016-10-03 13:43:02 -06005830 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
5831 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005832}
5833
stephen hemminger1d143d92013-12-29 14:01:29 -08005834static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5835 struct net_device *upper_dev,
5836 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005837{
David Ahernf1170fd2016-10-17 19:15:51 -07005838 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5839 &dev->adj_list.upper,
5840 &upper_dev->adj_list.lower,
5841 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005842}
5843
stephen hemminger1d143d92013-12-29 14:01:29 -08005844static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5845 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005846{
Andrew Collins93409032016-10-03 13:43:02 -06005847 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005848 &dev->adj_list.upper,
5849 &upper_dev->adj_list.lower);
5850}
Veaceslav Falico5d261912013-08-28 23:25:05 +02005851
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005852static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005853 struct net_device *upper_dev, bool master,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005854 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005855{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005856 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005857 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005858
5859 ASSERT_RTNL();
5860
5861 if (dev == upper_dev)
5862 return -EBUSY;
5863
5864 /* To prevent loops, check if dev is not upper device to upper_dev. */
David Ahernf1170fd2016-10-17 19:15:51 -07005865 if (netdev_has_upper_dev(upper_dev, dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005866 return -EBUSY;
5867
David Ahernf1170fd2016-10-17 19:15:51 -07005868 if (netdev_has_upper_dev(dev, upper_dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005869 return -EEXIST;
5870
5871 if (master && netdev_master_upper_dev_get(dev))
5872 return -EBUSY;
5873
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005874 changeupper_info.upper_dev = upper_dev;
5875 changeupper_info.master = master;
5876 changeupper_info.linking = true;
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005877 changeupper_info.upper_info = upper_info;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005878
Jiri Pirko573c7ba2015-10-16 14:01:22 +02005879 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5880 &changeupper_info.info);
5881 ret = notifier_to_errno(ret);
5882 if (ret)
5883 return ret;
5884
Jiri Pirko6dffb042015-12-03 12:12:10 +01005885 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005886 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005887 if (ret)
5888 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005889
Ido Schimmelb03804e2015-12-03 12:12:03 +01005890 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5891 &changeupper_info.info);
5892 ret = notifier_to_errno(ret);
5893 if (ret)
David Ahernf1170fd2016-10-17 19:15:51 -07005894 goto rollback;
Ido Schimmelb03804e2015-12-03 12:12:03 +01005895
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005896 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005897
David Ahernf1170fd2016-10-17 19:15:51 -07005898rollback:
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005899 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005900
5901 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005902}
5903
5904/**
5905 * netdev_upper_dev_link - Add a link to the upper device
5906 * @dev: device
5907 * @upper_dev: new upper device
5908 *
5909 * Adds a link to device which is upper to this one. The caller must hold
5910 * the RTNL lock. On a failure a negative errno code is returned.
5911 * On success the reference counts are adjusted and the function
5912 * returns zero.
5913 */
5914int netdev_upper_dev_link(struct net_device *dev,
5915 struct net_device *upper_dev)
5916{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005917 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005918}
5919EXPORT_SYMBOL(netdev_upper_dev_link);
5920
5921/**
5922 * netdev_master_upper_dev_link - Add a master link to the upper device
5923 * @dev: device
5924 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01005925 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005926 * @upper_info: upper info to be passed down via notifier
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005927 *
5928 * Adds a link to device which is upper to this one. In this case, only
5929 * one master upper device can be linked, although other non-master devices
5930 * might be linked as well. The caller must hold the RTNL lock.
5931 * On a failure a negative errno code is returned. On success the reference
5932 * counts are adjusted and the function returns zero.
5933 */
5934int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01005935 struct net_device *upper_dev,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005936 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005937{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005938 return __netdev_upper_dev_link(dev, upper_dev, true,
5939 upper_priv, upper_info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005940}
5941EXPORT_SYMBOL(netdev_master_upper_dev_link);
5942
5943/**
5944 * netdev_upper_dev_unlink - Removes a link to upper device
5945 * @dev: device
5946 * @upper_dev: new upper device
5947 *
5948 * Removes a link to device which is upper to this one. The caller must hold
5949 * the RTNL lock.
5950 */
5951void netdev_upper_dev_unlink(struct net_device *dev,
5952 struct net_device *upper_dev)
5953{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005954 struct netdev_notifier_changeupper_info changeupper_info;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005955 ASSERT_RTNL();
5956
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005957 changeupper_info.upper_dev = upper_dev;
5958 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5959 changeupper_info.linking = false;
5960
Jiri Pirko573c7ba2015-10-16 14:01:22 +02005961 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5962 &changeupper_info.info);
5963
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005964 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005965
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005966 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5967 &changeupper_info.info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005968}
5969EXPORT_SYMBOL(netdev_upper_dev_unlink);
5970
Moni Shoua61bd3852015-02-03 16:48:29 +02005971/**
5972 * netdev_bonding_info_change - Dispatch event about slave change
5973 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09005974 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02005975 *
5976 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5977 * The caller must hold the RTNL lock.
5978 */
5979void netdev_bonding_info_change(struct net_device *dev,
5980 struct netdev_bonding_info *bonding_info)
5981{
5982 struct netdev_notifier_bonding_info info;
5983
5984 memcpy(&info.bonding_info, bonding_info,
5985 sizeof(struct netdev_bonding_info));
5986 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5987 &info.info);
5988}
5989EXPORT_SYMBOL(netdev_bonding_info_change);
5990
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08005991static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005992{
5993 struct netdev_adjacent *iter;
5994
5995 struct net *net = dev_net(dev);
5996
5997 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08005998 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005999 continue;
6000 netdev_adjacent_sysfs_add(iter->dev, dev,
6001 &iter->dev->adj_list.lower);
6002 netdev_adjacent_sysfs_add(dev, iter->dev,
6003 &dev->adj_list.upper);
6004 }
6005
6006 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006007 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006008 continue;
6009 netdev_adjacent_sysfs_add(iter->dev, dev,
6010 &iter->dev->adj_list.upper);
6011 netdev_adjacent_sysfs_add(dev, iter->dev,
6012 &dev->adj_list.lower);
6013 }
6014}
6015
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006016static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006017{
6018 struct netdev_adjacent *iter;
6019
6020 struct net *net = dev_net(dev);
6021
6022 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006023 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006024 continue;
6025 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6026 &iter->dev->adj_list.lower);
6027 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6028 &dev->adj_list.upper);
6029 }
6030
6031 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006032 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006033 continue;
6034 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6035 &iter->dev->adj_list.upper);
6036 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6037 &dev->adj_list.lower);
6038 }
6039}
6040
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006041void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02006042{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006043 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006044
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006045 struct net *net = dev_net(dev);
6046
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006047 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006048 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006049 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006050 netdev_adjacent_sysfs_del(iter->dev, oldname,
6051 &iter->dev->adj_list.lower);
6052 netdev_adjacent_sysfs_add(iter->dev, dev,
6053 &iter->dev->adj_list.lower);
6054 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006055
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006056 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006057 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006058 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006059 netdev_adjacent_sysfs_del(iter->dev, oldname,
6060 &iter->dev->adj_list.upper);
6061 netdev_adjacent_sysfs_add(iter->dev, dev,
6062 &iter->dev->adj_list.upper);
6063 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006064}
Veaceslav Falico402dae92013-09-25 09:20:09 +02006065
6066void *netdev_lower_dev_get_private(struct net_device *dev,
6067 struct net_device *lower_dev)
6068{
6069 struct netdev_adjacent *lower;
6070
6071 if (!lower_dev)
6072 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006073 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02006074 if (!lower)
6075 return NULL;
6076
6077 return lower->private;
6078}
6079EXPORT_SYMBOL(netdev_lower_dev_get_private);
6080
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006081
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006082int dev_get_nest_level(struct net_device *dev)
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006083{
6084 struct net_device *lower = NULL;
6085 struct list_head *iter;
6086 int max_nest = -1;
6087 int nest;
6088
6089 ASSERT_RTNL();
6090
6091 netdev_for_each_lower_dev(dev, lower, iter) {
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006092 nest = dev_get_nest_level(lower);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006093 if (max_nest < nest)
6094 max_nest = nest;
6095 }
6096
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006097 return max_nest + 1;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006098}
6099EXPORT_SYMBOL(dev_get_nest_level);
6100
Jiri Pirko04d48262015-12-03 12:12:15 +01006101/**
6102 * netdev_lower_change - Dispatch event about lower device state change
6103 * @lower_dev: device
6104 * @lower_state_info: state to dispatch
6105 *
6106 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6107 * The caller must hold the RTNL lock.
6108 */
6109void netdev_lower_state_changed(struct net_device *lower_dev,
6110 void *lower_state_info)
6111{
6112 struct netdev_notifier_changelowerstate_info changelowerstate_info;
6113
6114 ASSERT_RTNL();
6115 changelowerstate_info.lower_state_info = lower_state_info;
6116 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
6117 &changelowerstate_info.info);
6118}
6119EXPORT_SYMBOL(netdev_lower_state_changed);
6120
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006121static void dev_change_rx_flags(struct net_device *dev, int flags)
6122{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006123 const struct net_device_ops *ops = dev->netdev_ops;
6124
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05006125 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006126 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006127}
6128
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006129static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07006130{
Eric Dumazetb536db92011-11-30 21:42:26 +00006131 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006132 kuid_t uid;
6133 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07006134
Patrick McHardy24023452007-07-14 18:51:31 -07006135 ASSERT_RTNL();
6136
Wang Chendad9b332008-06-18 01:48:28 -07006137 dev->flags |= IFF_PROMISC;
6138 dev->promiscuity += inc;
6139 if (dev->promiscuity == 0) {
6140 /*
6141 * Avoid overflow.
6142 * If inc causes overflow, untouch promisc and return error.
6143 */
6144 if (inc < 0)
6145 dev->flags &= ~IFF_PROMISC;
6146 else {
6147 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006148 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6149 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006150 return -EOVERFLOW;
6151 }
6152 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006153 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006154 pr_info("device %s %s promiscuous mode\n",
6155 dev->name,
6156 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11006157 if (audit_enabled) {
6158 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006159 audit_log(current->audit_context, GFP_ATOMIC,
6160 AUDIT_ANOM_PROMISCUOUS,
6161 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6162 dev->name, (dev->flags & IFF_PROMISC),
6163 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07006164 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006165 from_kuid(&init_user_ns, uid),
6166 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006167 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11006168 }
Patrick McHardy24023452007-07-14 18:51:31 -07006169
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006170 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07006171 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006172 if (notify)
6173 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07006174 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006175}
6176
Linus Torvalds1da177e2005-04-16 15:20:36 -07006177/**
6178 * dev_set_promiscuity - update promiscuity count on a device
6179 * @dev: device
6180 * @inc: modifier
6181 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07006182 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006183 * remains above zero the interface remains promiscuous. Once it hits zero
6184 * the device reverts back to normal filtering operation. A negative inc
6185 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07006186 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006187 */
Wang Chendad9b332008-06-18 01:48:28 -07006188int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006189{
Eric Dumazetb536db92011-11-30 21:42:26 +00006190 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07006191 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006192
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006193 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07006194 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07006195 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07006196 if (dev->flags != old_flags)
6197 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07006198 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006199}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006200EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006201
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006202static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006203{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006204 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006205
Patrick McHardy24023452007-07-14 18:51:31 -07006206 ASSERT_RTNL();
6207
Linus Torvalds1da177e2005-04-16 15:20:36 -07006208 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07006209 dev->allmulti += inc;
6210 if (dev->allmulti == 0) {
6211 /*
6212 * Avoid overflow.
6213 * If inc causes overflow, untouch allmulti and return error.
6214 */
6215 if (inc < 0)
6216 dev->flags &= ~IFF_ALLMULTI;
6217 else {
6218 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006219 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6220 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006221 return -EOVERFLOW;
6222 }
6223 }
Patrick McHardy24023452007-07-14 18:51:31 -07006224 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006225 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07006226 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006227 if (notify)
6228 __dev_notify_flags(dev, old_flags,
6229 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07006230 }
Wang Chendad9b332008-06-18 01:48:28 -07006231 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006232}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006233
6234/**
6235 * dev_set_allmulti - update allmulti count on a device
6236 * @dev: device
6237 * @inc: modifier
6238 *
6239 * Add or remove reception of all multicast frames to a device. While the
6240 * count in the device remains above zero the interface remains listening
6241 * to all interfaces. Once it hits zero the device reverts back to normal
6242 * filtering operation. A negative @inc value is used to drop the counter
6243 * when releasing a resource needing all multicasts.
6244 * Return 0 if successful or a negative errno code on error.
6245 */
6246
6247int dev_set_allmulti(struct net_device *dev, int inc)
6248{
6249 return __dev_set_allmulti(dev, inc, true);
6250}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006251EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07006252
6253/*
6254 * Upload unicast and multicast address lists to device and
6255 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08006256 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07006257 * are present.
6258 */
6259void __dev_set_rx_mode(struct net_device *dev)
6260{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006261 const struct net_device_ops *ops = dev->netdev_ops;
6262
Patrick McHardy4417da62007-06-27 01:28:10 -07006263 /* dev_open will call this function so the list will stay sane. */
6264 if (!(dev->flags&IFF_UP))
6265 return;
6266
6267 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09006268 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07006269
Jiri Pirko01789342011-08-16 06:29:00 +00006270 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07006271 /* Unicast addresses changes may only happen under the rtnl,
6272 * therefore calling __dev_set_promiscuity here is safe.
6273 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006274 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006275 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006276 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006277 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006278 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006279 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07006280 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006281 }
Jiri Pirko01789342011-08-16 06:29:00 +00006282
6283 if (ops->ndo_set_rx_mode)
6284 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006285}
6286
6287void dev_set_rx_mode(struct net_device *dev)
6288{
David S. Millerb9e40852008-07-15 00:15:08 -07006289 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006290 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07006291 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292}
6293
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006294/**
6295 * dev_get_flags - get flags reported to userspace
6296 * @dev: device
6297 *
6298 * Get the combination of flag bits exported through APIs to userspace.
6299 */
Eric Dumazet95c96172012-04-15 05:58:06 +00006300unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006301{
Eric Dumazet95c96172012-04-15 05:58:06 +00006302 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006303
6304 flags = (dev->flags & ~(IFF_PROMISC |
6305 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08006306 IFF_RUNNING |
6307 IFF_LOWER_UP |
6308 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07006309 (dev->gflags & (IFF_PROMISC |
6310 IFF_ALLMULTI));
6311
Stefan Rompfb00055a2006-03-20 17:09:11 -08006312 if (netif_running(dev)) {
6313 if (netif_oper_up(dev))
6314 flags |= IFF_RUNNING;
6315 if (netif_carrier_ok(dev))
6316 flags |= IFF_LOWER_UP;
6317 if (netif_dormant(dev))
6318 flags |= IFF_DORMANT;
6319 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006320
6321 return flags;
6322}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006323EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006324
Patrick McHardybd380812010-02-26 06:34:53 +00006325int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006326{
Eric Dumazetb536db92011-11-30 21:42:26 +00006327 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00006328 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006329
Patrick McHardy24023452007-07-14 18:51:31 -07006330 ASSERT_RTNL();
6331
Linus Torvalds1da177e2005-04-16 15:20:36 -07006332 /*
6333 * Set the flags on our device.
6334 */
6335
6336 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6337 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6338 IFF_AUTOMEDIA)) |
6339 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6340 IFF_ALLMULTI));
6341
6342 /*
6343 * Load in the correct multicast list now the flags have changed.
6344 */
6345
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006346 if ((old_flags ^ flags) & IFF_MULTICAST)
6347 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07006348
Patrick McHardy4417da62007-06-27 01:28:10 -07006349 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006350
6351 /*
6352 * Have we downed the interface. We handle IFF_UP ourselves
6353 * according to user attempts to set it, rather than blindly
6354 * setting it.
6355 */
6356
6357 ret = 0;
Peter Pan(潘卫平)d215d102014-06-16 21:57:22 +08006358 if ((old_flags ^ flags) & IFF_UP)
Patrick McHardybd380812010-02-26 06:34:53 +00006359 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006362 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006363 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006364
Linus Torvalds1da177e2005-04-16 15:20:36 -07006365 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006366
6367 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6368 if (dev->flags != old_flags)
6369 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006370 }
6371
6372 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6373 is important. Some (broken) drivers set IFF_PROMISC, when
6374 IFF_ALLMULTI is requested not asking us and not reporting.
6375 */
6376 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006377 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6378
Linus Torvalds1da177e2005-04-16 15:20:36 -07006379 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006380 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006381 }
6382
Patrick McHardybd380812010-02-26 06:34:53 +00006383 return ret;
6384}
6385
Nicolas Dichtela528c212013-09-25 12:02:44 +02006386void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6387 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00006388{
6389 unsigned int changes = dev->flags ^ old_flags;
6390
Nicolas Dichtela528c212013-09-25 12:02:44 +02006391 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006392 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006393
Patrick McHardybd380812010-02-26 06:34:53 +00006394 if (changes & IFF_UP) {
6395 if (dev->flags & IFF_UP)
6396 call_netdevice_notifiers(NETDEV_UP, dev);
6397 else
6398 call_netdevice_notifiers(NETDEV_DOWN, dev);
6399 }
6400
6401 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00006402 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6403 struct netdev_notifier_change_info change_info;
6404
6405 change_info.flags_changed = changes;
6406 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6407 &change_info.info);
6408 }
Patrick McHardybd380812010-02-26 06:34:53 +00006409}
6410
6411/**
6412 * dev_change_flags - change device settings
6413 * @dev: device
6414 * @flags: device state flags
6415 *
6416 * Change settings on device based state flags. The flags are
6417 * in the userspace exported format.
6418 */
Eric Dumazetb536db92011-11-30 21:42:26 +00006419int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00006420{
Eric Dumazetb536db92011-11-30 21:42:26 +00006421 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006422 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00006423
6424 ret = __dev_change_flags(dev, flags);
6425 if (ret < 0)
6426 return ret;
6427
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006428 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006429 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006430 return ret;
6431}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006432EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006433
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006434static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6435{
6436 const struct net_device_ops *ops = dev->netdev_ops;
6437
6438 if (ops->ndo_change_mtu)
6439 return ops->ndo_change_mtu(dev, new_mtu);
6440
6441 dev->mtu = new_mtu;
6442 return 0;
6443}
6444
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006445/**
6446 * dev_set_mtu - Change maximum transfer unit
6447 * @dev: device
6448 * @new_mtu: new transfer unit
6449 *
6450 * Change the maximum transfer size of the network device.
6451 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006452int dev_set_mtu(struct net_device *dev, int new_mtu)
6453{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006454 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006455
6456 if (new_mtu == dev->mtu)
6457 return 0;
6458
Jarod Wilson61e84622016-10-07 22:04:33 -04006459 /* MTU must be positive, and in range */
6460 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6461 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6462 dev->name, new_mtu, dev->min_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006463 return -EINVAL;
Jarod Wilson61e84622016-10-07 22:04:33 -04006464 }
6465
6466 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6467 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
Jakub Kicinskia0e65de2016-10-17 18:02:22 +01006468 dev->name, new_mtu, dev->max_mtu);
Jarod Wilson61e84622016-10-07 22:04:33 -04006469 return -EINVAL;
6470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471
6472 if (!netif_device_present(dev))
6473 return -ENODEV;
6474
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01006475 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6476 err = notifier_to_errno(err);
6477 if (err)
6478 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006479
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006480 orig_mtu = dev->mtu;
6481 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006482
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006483 if (!err) {
6484 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6485 err = notifier_to_errno(err);
6486 if (err) {
6487 /* setting mtu back and notifying everyone again,
6488 * so that they have a chance to revert changes.
6489 */
6490 __dev_set_mtu(dev, orig_mtu);
6491 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6492 }
6493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006494 return err;
6495}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006496EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006497
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006498/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006499 * dev_set_group - Change group this device belongs to
6500 * @dev: device
6501 * @new_group: group this device should belong to
6502 */
6503void dev_set_group(struct net_device *dev, int new_group)
6504{
6505 dev->group = new_group;
6506}
6507EXPORT_SYMBOL(dev_set_group);
6508
6509/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006510 * dev_set_mac_address - Change Media Access Control Address
6511 * @dev: device
6512 * @sa: new address
6513 *
6514 * Change the hardware (MAC) address of the device
6515 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006516int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6517{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006518 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006519 int err;
6520
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006521 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006522 return -EOPNOTSUPP;
6523 if (sa->sa_family != dev->type)
6524 return -EINVAL;
6525 if (!netif_device_present(dev))
6526 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006527 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00006528 if (err)
6529 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00006530 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00006531 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006532 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00006533 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006534}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006535EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006536
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006537/**
6538 * dev_change_carrier - Change device carrier
6539 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00006540 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006541 *
6542 * Change device carrier
6543 */
6544int dev_change_carrier(struct net_device *dev, bool new_carrier)
6545{
6546 const struct net_device_ops *ops = dev->netdev_ops;
6547
6548 if (!ops->ndo_change_carrier)
6549 return -EOPNOTSUPP;
6550 if (!netif_device_present(dev))
6551 return -ENODEV;
6552 return ops->ndo_change_carrier(dev, new_carrier);
6553}
6554EXPORT_SYMBOL(dev_change_carrier);
6555
Linus Torvalds1da177e2005-04-16 15:20:36 -07006556/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02006557 * dev_get_phys_port_id - Get device physical port ID
6558 * @dev: device
6559 * @ppid: port ID
6560 *
6561 * Get device physical port ID
6562 */
6563int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01006564 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02006565{
6566 const struct net_device_ops *ops = dev->netdev_ops;
6567
6568 if (!ops->ndo_get_phys_port_id)
6569 return -EOPNOTSUPP;
6570 return ops->ndo_get_phys_port_id(dev, ppid);
6571}
6572EXPORT_SYMBOL(dev_get_phys_port_id);
6573
6574/**
David Aherndb24a902015-03-17 20:23:15 -06006575 * dev_get_phys_port_name - Get device physical port name
6576 * @dev: device
6577 * @name: port name
Luis de Bethencourted49e652016-03-21 16:31:14 +00006578 * @len: limit of bytes to copy to name
David Aherndb24a902015-03-17 20:23:15 -06006579 *
6580 * Get device physical port name
6581 */
6582int dev_get_phys_port_name(struct net_device *dev,
6583 char *name, size_t len)
6584{
6585 const struct net_device_ops *ops = dev->netdev_ops;
6586
6587 if (!ops->ndo_get_phys_port_name)
6588 return -EOPNOTSUPP;
6589 return ops->ndo_get_phys_port_name(dev, name, len);
6590}
6591EXPORT_SYMBOL(dev_get_phys_port_name);
6592
6593/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006594 * dev_change_proto_down - update protocol port state information
6595 * @dev: device
6596 * @proto_down: new value
6597 *
6598 * This info can be used by switch drivers to set the phys state of the
6599 * port.
6600 */
6601int dev_change_proto_down(struct net_device *dev, bool proto_down)
6602{
6603 const struct net_device_ops *ops = dev->netdev_ops;
6604
6605 if (!ops->ndo_change_proto_down)
6606 return -EOPNOTSUPP;
6607 if (!netif_device_present(dev))
6608 return -ENODEV;
6609 return ops->ndo_change_proto_down(dev, proto_down);
6610}
6611EXPORT_SYMBOL(dev_change_proto_down);
6612
6613/**
Brenden Blancoa7862b42016-07-19 12:16:48 -07006614 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6615 * @dev: device
6616 * @fd: new program fd or negative value to clear
Daniel Borkmann85de8572016-11-28 23:16:54 +01006617 * @flags: xdp-related flags
Brenden Blancoa7862b42016-07-19 12:16:48 -07006618 *
6619 * Set or clear a bpf program for a device
6620 */
Daniel Borkmann85de8572016-11-28 23:16:54 +01006621int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags)
Brenden Blancoa7862b42016-07-19 12:16:48 -07006622{
6623 const struct net_device_ops *ops = dev->netdev_ops;
6624 struct bpf_prog *prog = NULL;
Daniel Borkmann85de8572016-11-28 23:16:54 +01006625 struct netdev_xdp xdp;
Brenden Blancoa7862b42016-07-19 12:16:48 -07006626 int err;
6627
Daniel Borkmann85de8572016-11-28 23:16:54 +01006628 ASSERT_RTNL();
6629
Brenden Blancoa7862b42016-07-19 12:16:48 -07006630 if (!ops->ndo_xdp)
6631 return -EOPNOTSUPP;
6632 if (fd >= 0) {
Daniel Borkmann85de8572016-11-28 23:16:54 +01006633 if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) {
6634 memset(&xdp, 0, sizeof(xdp));
6635 xdp.command = XDP_QUERY_PROG;
6636
6637 err = ops->ndo_xdp(dev, &xdp);
6638 if (err < 0)
6639 return err;
6640 if (xdp.prog_attached)
6641 return -EBUSY;
6642 }
6643
Brenden Blancoa7862b42016-07-19 12:16:48 -07006644 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6645 if (IS_ERR(prog))
6646 return PTR_ERR(prog);
6647 }
6648
Daniel Borkmann85de8572016-11-28 23:16:54 +01006649 memset(&xdp, 0, sizeof(xdp));
Brenden Blancoa7862b42016-07-19 12:16:48 -07006650 xdp.command = XDP_SETUP_PROG;
6651 xdp.prog = prog;
Daniel Borkmann85de8572016-11-28 23:16:54 +01006652
Brenden Blancoa7862b42016-07-19 12:16:48 -07006653 err = ops->ndo_xdp(dev, &xdp);
6654 if (err < 0 && prog)
6655 bpf_prog_put(prog);
6656
6657 return err;
6658}
6659EXPORT_SYMBOL(dev_change_xdp_fd);
6660
6661/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006662 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07006663 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07006664 *
6665 * Returns a suitable unique value for a new device interface
6666 * number. The caller must hold the rtnl semaphore or the
6667 * dev_base_lock to be sure it remains unique.
6668 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07006669static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006670{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00006671 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006672 for (;;) {
6673 if (++ifindex <= 0)
6674 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006675 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00006676 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006677 }
6678}
6679
Linus Torvalds1da177e2005-04-16 15:20:36 -07006680/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08006681static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07006682DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683
Stephen Hemminger6f05f622007-03-08 20:46:03 -08006684static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006685{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006686 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07006687 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006688}
6689
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006690static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006691{
Krishna Kumare93737b2009-12-08 22:26:02 +00006692 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006693 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006694
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006695 BUG_ON(dev_boot_phase);
6696 ASSERT_RTNL();
6697
Krishna Kumare93737b2009-12-08 22:26:02 +00006698 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006699 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00006700 * for initialization unwind. Remove those
6701 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006702 */
6703 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006704 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6705 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006706
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006707 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00006708 list_del(&dev->unreg_list);
6709 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006710 }
Eric Dumazet449f4542011-05-19 12:24:16 +00006711 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006712 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00006713 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006714
Octavian Purdila44345722010-12-13 12:44:07 +00006715 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006716 list_for_each_entry(dev, head, unreg_list)
6717 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04006718 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006719
Octavian Purdila44345722010-12-13 12:44:07 +00006720 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006721 /* And unlink it from device chain. */
6722 unlist_netdevice(dev);
6723
6724 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006725 }
Eric Dumazet41852492016-08-26 12:50:39 -07006726 flush_all_backlogs();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006727
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006728 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006729
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006730 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006731 struct sk_buff *skb = NULL;
6732
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006733 /* Shutdown queueing discipline. */
6734 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006735
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006736
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006737 /* Notify protocols, that we are about to destroy
6738 this device. They should clean all the things.
6739 */
6740 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6741
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006742 if (!dev->rtnl_link_ops ||
6743 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6744 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6745 GFP_KERNEL);
6746
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006747 /*
6748 * Flush the unicast and multicast chains
6749 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006750 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006751 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006752
6753 if (dev->netdev_ops->ndo_uninit)
6754 dev->netdev_ops->ndo_uninit(dev);
6755
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006756 if (skb)
6757 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07006758
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006759 /* Notifier chain MUST detach us all upper devices. */
6760 WARN_ON(netdev_has_any_upper_dev(dev));
David Ahern0f524a82016-10-17 19:15:52 -07006761 WARN_ON(netdev_has_any_lower_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006762
6763 /* Remove entries from kobject tree */
6764 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00006765#ifdef CONFIG_XPS
6766 /* Remove XPS queueing entries */
6767 netif_reset_xps_queues_gt(dev, 0);
6768#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006769 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006770
Eric W. Biederman850a5452011-10-13 22:25:23 +00006771 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006772
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00006773 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006774 dev_put(dev);
6775}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006776
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006777static void rollback_registered(struct net_device *dev)
6778{
6779 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006780
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006781 list_add(&dev->unreg_list, &single);
6782 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006783 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006784}
6785
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006786static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
6787 struct net_device *upper, netdev_features_t features)
6788{
6789 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6790 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006791 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006792
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006793 for_each_netdev_feature(&upper_disables, feature_bit) {
6794 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006795 if (!(upper->wanted_features & feature)
6796 && (features & feature)) {
6797 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
6798 &feature, upper->name);
6799 features &= ~feature;
6800 }
6801 }
6802
6803 return features;
6804}
6805
6806static void netdev_sync_lower_features(struct net_device *upper,
6807 struct net_device *lower, netdev_features_t features)
6808{
6809 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6810 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006811 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006812
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006813 for_each_netdev_feature(&upper_disables, feature_bit) {
6814 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006815 if (!(features & feature) && (lower->features & feature)) {
6816 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
6817 &feature, lower->name);
6818 lower->wanted_features &= ~feature;
6819 netdev_update_features(lower);
6820
6821 if (unlikely(lower->features & feature))
6822 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
6823 &feature, lower->name);
6824 }
6825 }
6826}
6827
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006828static netdev_features_t netdev_fix_features(struct net_device *dev,
6829 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07006830{
Michał Mirosław57422dc2011-01-22 12:14:12 +00006831 /* Fix illegal checksum combinations */
6832 if ((features & NETIF_F_HW_CSUM) &&
6833 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006834 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00006835 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6836 }
6837
Herbert Xub63365a2008-10-23 01:11:29 -07006838 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00006839 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006840 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00006841 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07006842 }
6843
Pravin B Shelarec5f0612013-03-07 09:28:01 +00006844 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6845 !(features & NETIF_F_IP_CSUM)) {
6846 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6847 features &= ~NETIF_F_TSO;
6848 features &= ~NETIF_F_TSO_ECN;
6849 }
6850
6851 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6852 !(features & NETIF_F_IPV6_CSUM)) {
6853 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6854 features &= ~NETIF_F_TSO6;
6855 }
6856
Alexander Duyckb1dc4972016-05-02 09:38:24 -07006857 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
6858 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
6859 features &= ~NETIF_F_TSO_MANGLEID;
6860
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00006861 /* TSO ECN requires that TSO is present as well. */
6862 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6863 features &= ~NETIF_F_TSO_ECN;
6864
Michał Mirosław212b5732011-02-15 16:59:16 +00006865 /* Software GSO depends on SG. */
6866 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006867 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00006868 features &= ~NETIF_F_GSO;
6869 }
6870
Michał Mirosławacd11302011-01-24 15:45:15 -08006871 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07006872 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00006873 /* maybe split UFO into V4 and V6? */
Tom Herbertc8cd0982015-12-14 11:19:44 -08006874 if (!(features & NETIF_F_HW_CSUM) &&
6875 ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
6876 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006877 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006878 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006879 features &= ~NETIF_F_UFO;
6880 }
6881
6882 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006883 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006884 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006885 features &= ~NETIF_F_UFO;
6886 }
6887 }
6888
Alexander Duyck802ab552016-04-10 21:45:03 -04006889 /* GSO partial features require GSO partial be set */
6890 if ((features & dev->gso_partial_features) &&
6891 !(features & NETIF_F_GSO_PARTIAL)) {
6892 netdev_dbg(dev,
6893 "Dropping partially supported GSO features since no GSO partial.\n");
6894 features &= ~dev->gso_partial_features;
6895 }
6896
Herbert Xub63365a2008-10-23 01:11:29 -07006897 return features;
6898}
Herbert Xub63365a2008-10-23 01:11:29 -07006899
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006900int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00006901{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006902 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006903 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006904 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05006905 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00006906
Michał Mirosław87267482011-04-12 09:56:38 +00006907 ASSERT_RTNL();
6908
Michał Mirosław5455c692011-02-15 16:59:17 +00006909 features = netdev_get_wanted_features(dev);
6910
6911 if (dev->netdev_ops->ndo_fix_features)
6912 features = dev->netdev_ops->ndo_fix_features(dev, features);
6913
6914 /* driver might be less strict about feature dependencies */
6915 features = netdev_fix_features(dev, features);
6916
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006917 /* some features can't be enabled if they're off an an upper device */
6918 netdev_for_each_upper_dev_rcu(dev, upper, iter)
6919 features = netdev_sync_upper_features(dev, upper, features);
6920
Michał Mirosław5455c692011-02-15 16:59:17 +00006921 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05006922 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00006923
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006924 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6925 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00006926
6927 if (dev->netdev_ops->ndo_set_features)
6928 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01006929 else
6930 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00006931
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006932 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00006933 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006934 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6935 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01006936 /* return non-0 since some features might have changed and
6937 * it's better to fire a spurious notification than miss it
6938 */
6939 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006940 }
6941
Jarod Wilsone7868a82015-11-03 23:09:32 -05006942sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006943 /* some features must be disabled on lower devices when disabled
6944 * on an upper device (think: bonding master or bridge)
6945 */
6946 netdev_for_each_lower_dev(dev, lower, iter)
6947 netdev_sync_lower_features(dev, lower, features);
6948
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006949 if (!err)
6950 dev->features = features;
6951
Jarod Wilsone7868a82015-11-03 23:09:32 -05006952 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006953}
6954
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006955/**
6956 * netdev_update_features - recalculate device features
6957 * @dev: the device to check
6958 *
6959 * Recalculate dev->features set and send notifications if it
6960 * has changed. Should be called after driver or hardware dependent
6961 * conditions might have changed that influence the features.
6962 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006963void netdev_update_features(struct net_device *dev)
6964{
6965 if (__netdev_update_features(dev))
6966 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00006967}
6968EXPORT_SYMBOL(netdev_update_features);
6969
Linus Torvalds1da177e2005-04-16 15:20:36 -07006970/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006971 * netdev_change_features - recalculate device features
6972 * @dev: the device to check
6973 *
6974 * Recalculate dev->features set and send notifications even
6975 * if they have not changed. Should be called instead of
6976 * netdev_update_features() if also dev->vlan_features might
6977 * have changed to allow the changes to be propagated to stacked
6978 * VLAN devices.
6979 */
6980void netdev_change_features(struct net_device *dev)
6981{
6982 __netdev_update_features(dev);
6983 netdev_features_change(dev);
6984}
6985EXPORT_SYMBOL(netdev_change_features);
6986
6987/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006988 * netif_stacked_transfer_operstate - transfer operstate
6989 * @rootdev: the root or lower level device to transfer state from
6990 * @dev: the device to transfer operstate to
6991 *
6992 * Transfer operational state from root to device. This is normally
6993 * called when a stacking relationship exists between the root
6994 * device and the device(a leaf device).
6995 */
6996void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6997 struct net_device *dev)
6998{
6999 if (rootdev->operstate == IF_OPER_DORMANT)
7000 netif_dormant_on(dev);
7001 else
7002 netif_dormant_off(dev);
7003
7004 if (netif_carrier_ok(rootdev)) {
7005 if (!netif_carrier_ok(dev))
7006 netif_carrier_on(dev);
7007 } else {
7008 if (netif_carrier_ok(dev))
7009 netif_carrier_off(dev);
7010 }
7011}
7012EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7013
Michael Daltona953be52014-01-16 22:23:28 -08007014#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007015static int netif_alloc_rx_queues(struct net_device *dev)
7016{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007017 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00007018 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05307019 size_t sz = count * sizeof(*rx);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007020
Tom Herbertbd25fa72010-10-18 18:00:16 +00007021 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007022
Pankaj Gupta10595902015-01-12 11:41:28 +05307023 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7024 if (!rx) {
7025 rx = vzalloc(sz);
7026 if (!rx)
7027 return -ENOMEM;
7028 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00007029 dev->_rx = rx;
7030
Tom Herbertbd25fa72010-10-18 18:00:16 +00007031 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00007032 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007033 return 0;
7034}
Tom Herbertbf264142010-11-26 08:36:09 +00007035#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007036
Changli Gaoaa942102010-12-04 02:31:41 +00007037static void netdev_init_one_queue(struct net_device *dev,
7038 struct netdev_queue *queue, void *_unused)
7039{
7040 /* Initialize queue lock */
7041 spin_lock_init(&queue->_xmit_lock);
7042 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7043 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00007044 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00007045 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00007046#ifdef CONFIG_BQL
7047 dql_init(&queue->dql, HZ);
7048#endif
Changli Gaoaa942102010-12-04 02:31:41 +00007049}
7050
Eric Dumazet60877a32013-06-20 01:15:51 -07007051static void netif_free_tx_queues(struct net_device *dev)
7052{
WANG Cong4cb28972014-06-02 15:55:22 -07007053 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07007054}
7055
Tom Herberte6484932010-10-18 18:04:39 +00007056static int netif_alloc_netdev_queues(struct net_device *dev)
7057{
7058 unsigned int count = dev->num_tx_queues;
7059 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07007060 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00007061
Eric Dumazetd3397272015-07-06 17:13:26 +02007062 if (count < 1 || count > 0xffff)
7063 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00007064
Eric Dumazet60877a32013-06-20 01:15:51 -07007065 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7066 if (!tx) {
7067 tx = vzalloc(sz);
7068 if (!tx)
7069 return -ENOMEM;
7070 }
Tom Herberte6484932010-10-18 18:04:39 +00007071 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00007072
Tom Herberte6484932010-10-18 18:04:39 +00007073 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7074 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00007075
7076 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00007077}
7078
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007079void netif_tx_stop_all_queues(struct net_device *dev)
7080{
7081 unsigned int i;
7082
7083 for (i = 0; i < dev->num_tx_queues; i++) {
7084 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
7085 netif_tx_stop_queue(txq);
7086 }
7087}
7088EXPORT_SYMBOL(netif_tx_stop_all_queues);
7089
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007090/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007091 * register_netdevice - register a network device
7092 * @dev: device to register
7093 *
7094 * Take a completed network device structure and add it to the kernel
7095 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7096 * chain. 0 is returned on success. A negative errno code is returned
7097 * on a failure to set up the device, or if the name is a duplicate.
7098 *
7099 * Callers must hold the rtnl semaphore. You may want
7100 * register_netdev() instead of this.
7101 *
7102 * BUGS:
7103 * The locking appears insufficient to guarantee two parallel registers
7104 * will not get the same name.
7105 */
7106
7107int register_netdevice(struct net_device *dev)
7108{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007109 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007110 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007111
7112 BUG_ON(dev_boot_phase);
7113 ASSERT_RTNL();
7114
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007115 might_sleep();
7116
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117 /* When net_device's are persistent, this will be fatal. */
7118 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007119 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007120
David S. Millerf1f28aa2008-07-15 00:08:33 -07007121 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07007122 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007123
Gao feng828de4f2012-09-13 20:58:27 +00007124 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00007125 if (ret < 0)
7126 goto out;
7127
Linus Torvalds1da177e2005-04-16 15:20:36 -07007128 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007129 if (dev->netdev_ops->ndo_init) {
7130 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007131 if (ret) {
7132 if (ret > 0)
7133 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08007134 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135 }
7136 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007137
Patrick McHardyf6469682013-04-19 02:04:27 +00007138 if (((dev->hw_features | dev->features) &
7139 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00007140 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7141 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7142 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7143 ret = -EINVAL;
7144 goto err_uninit;
7145 }
7146
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00007147 ret = -EBUSY;
7148 if (!dev->ifindex)
7149 dev->ifindex = dev_new_index(net);
7150 else if (__dev_get_by_index(net, dev->ifindex))
7151 goto err_uninit;
7152
Michał Mirosław5455c692011-02-15 16:59:17 +00007153 /* Transfer changeable features to wanted_features and enable
7154 * software offloads (GSO and GRO).
7155 */
7156 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00007157 dev->features |= NETIF_F_SOFT_FEATURES;
7158 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007159
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007160 if (!(dev->flags & IFF_LOOPBACK))
Michał Mirosław34324dc2011-11-15 15:29:55 +00007161 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007162
Alexander Duyck7f348a62016-04-20 16:51:00 -04007163 /* If IPv4 TCP segmentation offload is supported we should also
7164 * allow the device to enable segmenting the frame with the option
7165 * of ignoring a static IP ID value. This doesn't enable the
7166 * feature itself but allows the user to enable it later.
7167 */
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007168 if (dev->hw_features & NETIF_F_TSO)
7169 dev->hw_features |= NETIF_F_TSO_MANGLEID;
Alexander Duyck7f348a62016-04-20 16:51:00 -04007170 if (dev->vlan_features & NETIF_F_TSO)
7171 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7172 if (dev->mpls_features & NETIF_F_TSO)
7173 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7174 if (dev->hw_enc_features & NETIF_F_TSO)
7175 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07007176
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007177 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00007178 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007179 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00007180
Pravin B Shelaree579672013-03-07 09:28:08 +00007181 /* Make NETIF_F_SG inheritable to tunnel devices.
7182 */
Alexander Duyck802ab552016-04-10 21:45:03 -04007183 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
Pravin B Shelaree579672013-03-07 09:28:08 +00007184
Simon Horman0d89d202013-05-23 21:02:52 +00007185 /* Make NETIF_F_SG inheritable to MPLS.
7186 */
7187 dev->mpls_features |= NETIF_F_SG;
7188
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00007189 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7190 ret = notifier_to_errno(ret);
7191 if (ret)
7192 goto err_uninit;
7193
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007194 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007195 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007196 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007197 dev->reg_state = NETREG_REGISTERED;
7198
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007199 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00007200
Linus Torvalds1da177e2005-04-16 15:20:36 -07007201 /*
7202 * Default initial state at registry is that the
7203 * device is present.
7204 */
7205
7206 set_bit(__LINK_STATE_PRESENT, &dev->state);
7207
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01007208 linkwatch_init_dev(dev);
7209
Linus Torvalds1da177e2005-04-16 15:20:36 -07007210 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007211 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007212 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04007213 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007214
Jiri Pirko948b3372013-01-08 01:38:25 +00007215 /* If the device has permanent device address, driver should
7216 * set dev_addr and also addr_assign_type should be set to
7217 * NET_ADDR_PERM (default value).
7218 */
7219 if (dev->addr_assign_type == NET_ADDR_PERM)
7220 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7221
Linus Torvalds1da177e2005-04-16 15:20:36 -07007222 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007223 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07007224 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007225 if (ret) {
7226 rollback_registered(dev);
7227 dev->reg_state = NETREG_UNREGISTERED;
7228 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007229 /*
7230 * Prevent userspace races by waiting until the network
7231 * device is fully setup before sending notifications.
7232 */
Patrick McHardya2835762010-02-26 06:34:51 +00007233 if (!dev->rtnl_link_ops ||
7234 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007235 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007236
7237out:
7238 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007239
7240err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007241 if (dev->netdev_ops->ndo_uninit)
7242 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007243 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007244}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007245EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007246
7247/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007248 * init_dummy_netdev - init a dummy network device for NAPI
7249 * @dev: device to init
7250 *
7251 * This takes a network device structure and initialize the minimum
7252 * amount of fields so it can be used to schedule NAPI polls without
7253 * registering a full blown interface. This is to be used by drivers
7254 * that need to tie several hardware interfaces to a single NAPI
7255 * poll scheduler due to HW limitations.
7256 */
7257int init_dummy_netdev(struct net_device *dev)
7258{
7259 /* Clear everything. Note we don't initialize spinlocks
7260 * are they aren't supposed to be taken by any of the
7261 * NAPI code and this dummy netdev is supposed to be
7262 * only ever used for NAPI polls
7263 */
7264 memset(dev, 0, sizeof(struct net_device));
7265
7266 /* make sure we BUG if trying to hit standard
7267 * register/unregister code path
7268 */
7269 dev->reg_state = NETREG_DUMMY;
7270
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007271 /* NAPI wants this */
7272 INIT_LIST_HEAD(&dev->napi_list);
7273
7274 /* a dummy interface is started by default */
7275 set_bit(__LINK_STATE_PRESENT, &dev->state);
7276 set_bit(__LINK_STATE_START, &dev->state);
7277
Eric Dumazet29b44332010-10-11 10:22:12 +00007278 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7279 * because users of this 'device' dont need to change
7280 * its refcount.
7281 */
7282
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007283 return 0;
7284}
7285EXPORT_SYMBOL_GPL(init_dummy_netdev);
7286
7287
7288/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007289 * register_netdev - register a network device
7290 * @dev: device to register
7291 *
7292 * Take a completed network device structure and add it to the kernel
7293 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7294 * chain. 0 is returned on success. A negative errno code is returned
7295 * on a failure to set up the device, or if the name is a duplicate.
7296 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07007297 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07007298 * and expands the device name if you passed a format string to
7299 * alloc_netdev.
7300 */
7301int register_netdev(struct net_device *dev)
7302{
7303 int err;
7304
7305 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007306 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007307 rtnl_unlock();
7308 return err;
7309}
7310EXPORT_SYMBOL(register_netdev);
7311
Eric Dumazet29b44332010-10-11 10:22:12 +00007312int netdev_refcnt_read(const struct net_device *dev)
7313{
7314 int i, refcnt = 0;
7315
7316 for_each_possible_cpu(i)
7317 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7318 return refcnt;
7319}
7320EXPORT_SYMBOL(netdev_refcnt_read);
7321
Ben Hutchings2c530402012-07-10 10:55:09 +00007322/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007323 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00007324 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07007325 *
7326 * This is called when unregistering network devices.
7327 *
7328 * Any protocol or device that holds a reference should register
7329 * for netdevice notification, and cleanup and put back the
7330 * reference if they receive an UNREGISTER event.
7331 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007332 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007333 */
7334static void netdev_wait_allrefs(struct net_device *dev)
7335{
7336 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00007337 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007338
Eric Dumazete014deb2009-11-17 05:59:21 +00007339 linkwatch_forget_dev(dev);
7340
Linus Torvalds1da177e2005-04-16 15:20:36 -07007341 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00007342 refcnt = netdev_refcnt_read(dev);
7343
7344 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007345 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007346 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007347
7348 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007349 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007350
Eric Dumazet748e2d92012-08-22 21:50:59 +00007351 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007352 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00007353 rtnl_lock();
7354
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007355 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007356 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7357 &dev->state)) {
7358 /* We must not have linkwatch events
7359 * pending on unregister. If this
7360 * happens, we simply run the queue
7361 * unscheduled, resulting in a noop
7362 * for this device.
7363 */
7364 linkwatch_run_queue();
7365 }
7366
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007367 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007368
7369 rebroadcast_time = jiffies;
7370 }
7371
7372 msleep(250);
7373
Eric Dumazet29b44332010-10-11 10:22:12 +00007374 refcnt = netdev_refcnt_read(dev);
7375
Linus Torvalds1da177e2005-04-16 15:20:36 -07007376 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007377 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7378 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007379 warning_time = jiffies;
7380 }
7381 }
7382}
7383
7384/* The sequence is:
7385 *
7386 * rtnl_lock();
7387 * ...
7388 * register_netdevice(x1);
7389 * register_netdevice(x2);
7390 * ...
7391 * unregister_netdevice(y1);
7392 * unregister_netdevice(y2);
7393 * ...
7394 * rtnl_unlock();
7395 * free_netdev(y1);
7396 * free_netdev(y2);
7397 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07007398 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007399 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007400 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07007401 * without deadlocking with linkwatch via keventd.
7402 * 2) Since we run with the RTNL semaphore not held, we can sleep
7403 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07007404 *
7405 * We must not return until all unregister events added during
7406 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007407 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007408void netdev_run_todo(void)
7409{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007410 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007411
Linus Torvalds1da177e2005-04-16 15:20:36 -07007412 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007413 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07007414
7415 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007416
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007417
7418 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00007419 if (!list_empty(&list))
7420 rcu_barrier();
7421
Linus Torvalds1da177e2005-04-16 15:20:36 -07007422 while (!list_empty(&list)) {
7423 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00007424 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007425 list_del(&dev->todo_list);
7426
Eric Dumazet748e2d92012-08-22 21:50:59 +00007427 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007428 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00007429 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007430
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007431 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007432 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007433 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007434 dump_stack();
7435 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007436 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007437
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007438 dev->reg_state = NETREG_UNREGISTERED;
7439
7440 netdev_wait_allrefs(dev);
7441
7442 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00007443 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08007444 BUG_ON(!list_empty(&dev->ptype_all));
7445 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00007446 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7447 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07007448 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007449
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007450 if (dev->destructor)
7451 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007452
Eric W. Biederman50624c92013-09-23 21:19:49 -07007453 /* Report a network device has been unregistered */
7454 rtnl_lock();
7455 dev_net(dev)->dev_unreg_count--;
7456 __rtnl_unlock();
7457 wake_up(&netdev_unregistering_wq);
7458
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007459 /* Free network device */
7460 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007462}
7463
Jarod Wilson92566452016-02-01 18:51:04 -05007464/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7465 * all the same fields in the same order as net_device_stats, with only
7466 * the type differing, but rtnl_link_stats64 may have additional fields
7467 * at the end for newer counters.
Ben Hutchings3cfde792010-07-09 09:11:52 +00007468 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007469void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7470 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00007471{
7472#if BITS_PER_LONG == 64
Jarod Wilson92566452016-02-01 18:51:04 -05007473 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007474 memcpy(stats64, netdev_stats, sizeof(*stats64));
Jarod Wilson92566452016-02-01 18:51:04 -05007475 /* zero out counters that only exist in rtnl_link_stats64 */
7476 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7477 sizeof(*stats64) - sizeof(*netdev_stats));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007478#else
Jarod Wilson92566452016-02-01 18:51:04 -05007479 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007480 const unsigned long *src = (const unsigned long *)netdev_stats;
7481 u64 *dst = (u64 *)stats64;
7482
Jarod Wilson92566452016-02-01 18:51:04 -05007483 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007484 for (i = 0; i < n; i++)
7485 dst[i] = src[i];
Jarod Wilson92566452016-02-01 18:51:04 -05007486 /* zero out counters that only exist in rtnl_link_stats64 */
7487 memset((char *)stats64 + n * sizeof(u64), 0,
7488 sizeof(*stats64) - n * sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007489#endif
7490}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007491EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007492
Eric Dumazetd83345a2009-11-16 03:36:51 +00007493/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007494 * dev_get_stats - get network device statistics
7495 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07007496 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007497 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00007498 * Get network statistics from device. Return @storage.
7499 * The device driver may provide its own method by setting
7500 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7501 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007502 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00007503struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7504 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00007505{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007506 const struct net_device_ops *ops = dev->netdev_ops;
7507
Eric Dumazet28172732010-07-07 14:58:56 -07007508 if (ops->ndo_get_stats64) {
7509 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007510 ops->ndo_get_stats64(dev, storage);
7511 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00007512 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007513 } else {
7514 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07007515 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007516 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet015f0682014-03-27 08:45:56 -07007517 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
Jarod Wilson6e7333d2016-02-01 18:51:05 -05007518 storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
Eric Dumazet28172732010-07-07 14:58:56 -07007519 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07007520}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007521EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07007522
Eric Dumazet24824a02010-10-02 06:11:55 +00007523struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07007524{
Eric Dumazet24824a02010-10-02 06:11:55 +00007525 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07007526
Eric Dumazet24824a02010-10-02 06:11:55 +00007527#ifdef CONFIG_NET_CLS_ACT
7528 if (queue)
7529 return queue;
7530 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7531 if (!queue)
7532 return NULL;
7533 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007534 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00007535 queue->qdisc_sleeping = &noop_qdisc;
7536 rcu_assign_pointer(dev->ingress_queue, queue);
7537#endif
7538 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07007539}
7540
Eric Dumazet2c60db02012-09-16 09:17:26 +00007541static const struct ethtool_ops default_ethtool_ops;
7542
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00007543void netdev_set_default_ethtool_ops(struct net_device *dev,
7544 const struct ethtool_ops *ops)
7545{
7546 if (dev->ethtool_ops == &default_ethtool_ops)
7547 dev->ethtool_ops = ops;
7548}
7549EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7550
Eric Dumazet74d332c2013-10-30 13:10:44 -07007551void netdev_freemem(struct net_device *dev)
7552{
7553 char *addr = (char *)dev - dev->padded;
7554
WANG Cong4cb28972014-06-02 15:55:22 -07007555 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007556}
7557
Linus Torvalds1da177e2005-04-16 15:20:36 -07007558/**
Tom Herbert36909ea2011-01-09 19:36:31 +00007559 * alloc_netdev_mqs - allocate network device
Tom Gundersenc835a672014-07-14 16:37:24 +02007560 * @sizeof_priv: size of private data to allocate space for
7561 * @name: device name format string
7562 * @name_assign_type: origin of device name
7563 * @setup: callback to initialize device
7564 * @txqs: the number of TX subqueues to allocate
7565 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566 *
7567 * Allocates a struct net_device with private data area for driver use
Li Zhong90e51ad2013-11-22 15:04:46 +08007568 * and performs basic initialization. Also allocates subqueue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00007569 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007570 */
Tom Herbert36909ea2011-01-09 19:36:31 +00007571struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02007572 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00007573 void (*setup)(struct net_device *),
7574 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007575{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007576 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07007577 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007578 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007579
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07007580 BUG_ON(strlen(name) >= sizeof(dev->name));
7581
Tom Herbert36909ea2011-01-09 19:36:31 +00007582 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007583 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00007584 return NULL;
7585 }
7586
Michael Daltona953be52014-01-16 22:23:28 -08007587#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00007588 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007589 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00007590 return NULL;
7591 }
7592#endif
7593
David S. Millerfd2ea0a2008-07-17 01:56:23 -07007594 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007595 if (sizeof_priv) {
7596 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007597 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007598 alloc_size += sizeof_priv;
7599 }
7600 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007601 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007602
Eric Dumazet74d332c2013-10-30 13:10:44 -07007603 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7604 if (!p)
7605 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00007606 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007607 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007608
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007609 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007610 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007611
Eric Dumazet29b44332010-10-11 10:22:12 +00007612 dev->pcpu_refcnt = alloc_percpu(int);
7613 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07007614 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007615
Linus Torvalds1da177e2005-04-16 15:20:36 -07007616 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00007617 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007618
Jiri Pirko22bedad32010-04-01 21:22:57 +00007619 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007620 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00007621
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007622 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007623
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07007624 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00007625 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007626
Herbert Xud565b0a2008-12-15 23:38:52 -08007627 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007628 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007629 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00007630 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007631 INIT_LIST_HEAD(&dev->adj_list.upper);
7632 INIT_LIST_HEAD(&dev->adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08007633 INIT_LIST_HEAD(&dev->ptype_all);
7634 INIT_LIST_HEAD(&dev->ptype_specific);
Jiri Kosina59cc1f62016-08-10 11:05:15 +02007635#ifdef CONFIG_NET_SCHED
7636 hash_init(dev->qdisc_hash);
7637#endif
Eric Dumazet02875872014-10-05 18:38:35 -07007638 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007639 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007640
Phil Suttera8131042016-02-17 15:37:43 +01007641 if (!dev->tx_queue_len) {
Phil Sutterf84bb1e2015-08-27 21:21:36 +02007642 dev->priv_flags |= IFF_NO_QUEUE;
Jesper Dangaard Brouer11597082016-11-03 14:56:06 +01007643 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
Phil Suttera8131042016-02-17 15:37:43 +01007644 }
Phil Sutter906470c2015-08-18 10:30:48 +02007645
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007646 dev->num_tx_queues = txqs;
7647 dev->real_num_tx_queues = txqs;
7648 if (netif_alloc_netdev_queues(dev))
7649 goto free_all;
7650
Michael Daltona953be52014-01-16 22:23:28 -08007651#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007652 dev->num_rx_queues = rxqs;
7653 dev->real_num_rx_queues = rxqs;
7654 if (netif_alloc_rx_queues(dev))
7655 goto free_all;
7656#endif
7657
Linus Torvalds1da177e2005-04-16 15:20:36 -07007658 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02007659 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00007660 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00007661 if (!dev->ethtool_ops)
7662 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +02007663
7664 nf_hook_ingress_init(dev);
7665
Linus Torvalds1da177e2005-04-16 15:20:36 -07007666 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007667
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007668free_all:
7669 free_netdev(dev);
7670 return NULL;
7671
Eric Dumazet29b44332010-10-11 10:22:12 +00007672free_pcpu:
7673 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007674free_dev:
7675 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007676 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007677}
Tom Herbert36909ea2011-01-09 19:36:31 +00007678EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007679
7680/**
7681 * free_netdev - free network device
7682 * @dev: device
7683 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007684 * This function does the last stage of destroying an allocated device
7685 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007686 * If this is the last reference then it will be freed.
Eric Dumazet93d05d42015-11-18 06:31:03 -08007687 * Must be called in process context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007688 */
7689void free_netdev(struct net_device *dev)
7690{
Herbert Xud565b0a2008-12-15 23:38:52 -08007691 struct napi_struct *p, *n;
7692
Eric Dumazet93d05d42015-11-18 06:31:03 -08007693 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -07007694 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08007695#ifdef CONFIG_SYSFS
Pankaj Gupta10595902015-01-12 11:41:28 +05307696 kvfree(dev->_rx);
Tom Herbertfe822242010-11-09 10:47:38 +00007697#endif
David S. Millere8a04642008-07-17 00:34:19 -07007698
Eric Dumazet33d480c2011-08-11 19:30:52 +00007699 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00007700
Jiri Pirkof001fde2009-05-05 02:48:28 +00007701 /* Flush device addresses */
7702 dev_addr_flush(dev);
7703
Herbert Xud565b0a2008-12-15 23:38:52 -08007704 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7705 netif_napi_del(p);
7706
Eric Dumazet29b44332010-10-11 10:22:12 +00007707 free_percpu(dev->pcpu_refcnt);
7708 dev->pcpu_refcnt = NULL;
7709
Stephen Hemminger3041a062006-05-26 13:25:24 -07007710 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007711 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07007712 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007713 return;
7714 }
7715
7716 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7717 dev->reg_state = NETREG_RELEASED;
7718
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07007719 /* will free via device release */
7720 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007721}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007722EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007723
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007724/**
7725 * synchronize_net - Synchronize with packet receive processing
7726 *
7727 * Wait for packets currently being received to be done.
7728 * Does not block later packets from starting.
7729 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007730void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007731{
7732 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00007733 if (rtnl_is_locked())
7734 synchronize_rcu_expedited();
7735 else
7736 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007737}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007738EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007739
7740/**
Eric Dumazet44a08732009-10-27 07:03:04 +00007741 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07007742 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00007743 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08007744 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007745 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08007746 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00007747 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007748 *
7749 * Callers must hold the rtnl semaphore. You may want
7750 * unregister_netdev() instead of this.
7751 */
7752
Eric Dumazet44a08732009-10-27 07:03:04 +00007753void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007754{
Herbert Xua6620712007-12-12 19:21:56 -08007755 ASSERT_RTNL();
7756
Eric Dumazet44a08732009-10-27 07:03:04 +00007757 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007758 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00007759 } else {
7760 rollback_registered(dev);
7761 /* Finish processing unregister after unlock */
7762 net_set_todo(dev);
7763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007764}
Eric Dumazet44a08732009-10-27 07:03:04 +00007765EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007766
7767/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007768 * unregister_netdevice_many - unregister many devices
7769 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07007770 *
7771 * Note: As most callers use a stack allocated list_head,
7772 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007773 */
7774void unregister_netdevice_many(struct list_head *head)
7775{
7776 struct net_device *dev;
7777
7778 if (!list_empty(head)) {
7779 rollback_registered_many(head);
7780 list_for_each_entry(dev, head, unreg_list)
7781 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07007782 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007783 }
7784}
Eric Dumazet63c80992009-10-27 07:06:49 +00007785EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007786
7787/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007788 * unregister_netdev - remove device from the kernel
7789 * @dev: device
7790 *
7791 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08007792 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007793 *
7794 * This is just a wrapper for unregister_netdevice that takes
7795 * the rtnl semaphore. In general you want to use this and not
7796 * unregister_netdevice.
7797 */
7798void unregister_netdev(struct net_device *dev)
7799{
7800 rtnl_lock();
7801 unregister_netdevice(dev);
7802 rtnl_unlock();
7803}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007804EXPORT_SYMBOL(unregister_netdev);
7805
Eric W. Biedermance286d32007-09-12 13:53:49 +02007806/**
7807 * dev_change_net_namespace - move device to different nethost namespace
7808 * @dev: device
7809 * @net: network namespace
7810 * @pat: If not NULL name pattern to try if the current device name
7811 * is already taken in the destination network namespace.
7812 *
7813 * This function shuts down a device interface and moves it
7814 * to a new network namespace. On success 0 is returned, on
7815 * a failure a netagive errno code is returned.
7816 *
7817 * Callers must hold the rtnl semaphore.
7818 */
7819
7820int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7821{
Eric W. Biedermance286d32007-09-12 13:53:49 +02007822 int err;
7823
7824 ASSERT_RTNL();
7825
7826 /* Don't allow namespace local devices to be moved. */
7827 err = -EINVAL;
7828 if (dev->features & NETIF_F_NETNS_LOCAL)
7829 goto out;
7830
7831 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02007832 if (dev->reg_state != NETREG_REGISTERED)
7833 goto out;
7834
7835 /* Get out if there is nothing todo */
7836 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09007837 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02007838 goto out;
7839
7840 /* Pick the destination device name, and ensure
7841 * we can use it in the destination network namespace.
7842 */
7843 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00007844 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007845 /* We get here if we can't use the current device name */
7846 if (!pat)
7847 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00007848 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007849 goto out;
7850 }
7851
7852 /*
7853 * And now a mini version of register_netdevice unregister_netdevice.
7854 */
7855
7856 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07007857 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007858
7859 /* And unlink it from device chain */
7860 err = -ENODEV;
7861 unlist_netdevice(dev);
7862
7863 synchronize_net();
7864
7865 /* Shutdown queueing discipline. */
7866 dev_shutdown(dev);
7867
7868 /* Notify protocols, that we are about to destroy
7869 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00007870
7871 Note that dev->reg_state stays at NETREG_REGISTERED.
7872 This is wanted because this way 8021q and macvlan know
7873 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02007874 */
7875 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00007876 rcu_barrier();
7877 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007878 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007879
7880 /*
7881 * Flush the unicast and multicast chains
7882 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007883 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00007884 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007885
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007886 /* Send a netdev-removed uevent to the old namespace */
7887 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007888 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007889
Eric W. Biedermance286d32007-09-12 13:53:49 +02007890 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007891 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007892
Eric W. Biedermance286d32007-09-12 13:53:49 +02007893 /* If there is an ifindex conflict assign a new one */
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02007894 if (__dev_get_by_index(net, dev->ifindex))
Eric W. Biedermance286d32007-09-12 13:53:49 +02007895 dev->ifindex = dev_new_index(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007896
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007897 /* Send a netdev-add uevent to the new namespace */
7898 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007899 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007900
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007901 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07007902 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007903 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007904
7905 /* Add the device back in the hashes */
7906 list_netdevice(dev);
7907
7908 /* Notify protocols, that a new device appeared. */
7909 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7910
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007911 /*
7912 * Prevent userspace races by waiting until the network
7913 * device is fully setup before sending notifications.
7914 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007915 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007916
Eric W. Biedermance286d32007-09-12 13:53:49 +02007917 synchronize_net();
7918 err = 0;
7919out:
7920 return err;
7921}
Johannes Berg463d0182009-07-14 00:33:35 +02007922EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007923
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01007924static int dev_cpu_dead(unsigned int oldcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007925{
7926 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007927 struct sk_buff *skb;
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01007928 unsigned int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007929 struct softnet_data *sd, *oldsd;
7930
Linus Torvalds1da177e2005-04-16 15:20:36 -07007931 local_irq_disable();
7932 cpu = smp_processor_id();
7933 sd = &per_cpu(softnet_data, cpu);
7934 oldsd = &per_cpu(softnet_data, oldcpu);
7935
7936 /* Find end of our completion_queue. */
7937 list_skb = &sd->completion_queue;
7938 while (*list_skb)
7939 list_skb = &(*list_skb)->next;
7940 /* Append completion queue from offline CPU. */
7941 *list_skb = oldsd->completion_queue;
7942 oldsd->completion_queue = NULL;
7943
Linus Torvalds1da177e2005-04-16 15:20:36 -07007944 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00007945 if (oldsd->output_queue) {
7946 *sd->output_queue_tailp = oldsd->output_queue;
7947 sd->output_queue_tailp = oldsd->output_queue_tailp;
7948 oldsd->output_queue = NULL;
7949 oldsd->output_queue_tailp = &oldsd->output_queue;
7950 }
Eric Dumazetac64da02015-01-15 17:04:22 -08007951 /* Append NAPI poll list from offline CPU, with one exception :
7952 * process_backlog() must be called by cpu owning percpu backlog.
7953 * We properly handle process_queue & input_pkt_queue later.
7954 */
7955 while (!list_empty(&oldsd->poll_list)) {
7956 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7957 struct napi_struct,
7958 poll_list);
7959
7960 list_del_init(&napi->poll_list);
7961 if (napi->poll == process_backlog)
7962 napi->state = 0;
7963 else
7964 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00007965 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007966
7967 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7968 local_irq_enable();
7969
7970 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00007971 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08007972 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00007973 input_queue_head_incr(oldsd);
7974 }
Eric Dumazetac64da02015-01-15 17:04:22 -08007975 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08007976 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00007977 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07007978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01007980 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007981}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007982
Herbert Xu7f353bf2007-08-10 15:47:58 -07007983/**
Herbert Xub63365a2008-10-23 01:11:29 -07007984 * netdev_increment_features - increment feature set by one
7985 * @all: current feature set
7986 * @one: new feature set
7987 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07007988 *
7989 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07007990 * @one to the master device with current feature set @all. Will not
7991 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07007992 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007993netdev_features_t netdev_increment_features(netdev_features_t all,
7994 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07007995{
Tom Herbertc8cd0982015-12-14 11:19:44 -08007996 if (mask & NETIF_F_HW_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08007997 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +00007998 mask |= NETIF_F_VLAN_CHALLENGED;
7999
Tom Herberta1882222015-12-14 11:19:43 -08008000 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +00008001 all &= one | ~NETIF_F_ALL_FOR_ALL;
8002
Michał Mirosław1742f182011-04-22 06:31:16 +00008003 /* If one device supports hw checksumming, set for all. */
Tom Herbertc8cd0982015-12-14 11:19:44 -08008004 if (all & NETIF_F_HW_CSUM)
8005 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008006
8007 return all;
8008}
Herbert Xub63365a2008-10-23 01:11:29 -07008009EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008010
Baruch Siach430f03c2013-06-02 20:43:55 +00008011static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008012{
8013 int i;
8014 struct hlist_head *hash;
8015
8016 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8017 if (hash != NULL)
8018 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8019 INIT_HLIST_HEAD(&hash[i]);
8020
8021 return hash;
8022}
8023
Eric W. Biederman881d9662007-09-17 11:56:21 -07008024/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07008025static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008026{
Rustad, Mark D734b6542012-07-18 09:06:07 +00008027 if (net != &init_net)
8028 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07008029
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008030 net->dev_name_head = netdev_create_hash();
8031 if (net->dev_name_head == NULL)
8032 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008033
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008034 net->dev_index_head = netdev_create_hash();
8035 if (net->dev_index_head == NULL)
8036 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008037
8038 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008039
8040err_idx:
8041 kfree(net->dev_name_head);
8042err_name:
8043 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008044}
8045
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008046/**
8047 * netdev_drivername - network driver for the device
8048 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008049 *
8050 * Determine network driver for device.
8051 */
David S. Miller3019de12011-06-06 16:41:33 -07008052const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07008053{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07008054 const struct device_driver *driver;
8055 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07008056 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07008057
8058 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008059 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07008060 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008061
8062 driver = parent->driver;
8063 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07008064 return driver->name;
8065 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008066}
8067
Joe Perches6ea754e2014-09-22 11:10:50 -07008068static void __netdev_printk(const char *level, const struct net_device *dev,
8069 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00008070{
Joe Perchesb004ff42012-09-12 20:12:19 -07008071 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008072 dev_printk_emit(level[1] - '0',
8073 dev->dev.parent,
8074 "%s %s %s%s: %pV",
8075 dev_driver_string(dev->dev.parent),
8076 dev_name(dev->dev.parent),
8077 netdev_name(dev), netdev_reg_state(dev),
8078 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008079 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008080 printk("%s%s%s: %pV",
8081 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008082 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07008083 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008084 }
Joe Perches256df2f2010-06-27 01:02:35 +00008085}
8086
Joe Perches6ea754e2014-09-22 11:10:50 -07008087void netdev_printk(const char *level, const struct net_device *dev,
8088 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00008089{
8090 struct va_format vaf;
8091 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00008092
8093 va_start(args, format);
8094
8095 vaf.fmt = format;
8096 vaf.va = &args;
8097
Joe Perches6ea754e2014-09-22 11:10:50 -07008098 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008099
Joe Perches256df2f2010-06-27 01:02:35 +00008100 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00008101}
8102EXPORT_SYMBOL(netdev_printk);
8103
8104#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07008105void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00008106{ \
Joe Perches256df2f2010-06-27 01:02:35 +00008107 struct va_format vaf; \
8108 va_list args; \
8109 \
8110 va_start(args, fmt); \
8111 \
8112 vaf.fmt = fmt; \
8113 vaf.va = &args; \
8114 \
Joe Perches6ea754e2014-09-22 11:10:50 -07008115 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07008116 \
Joe Perches256df2f2010-06-27 01:02:35 +00008117 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00008118} \
8119EXPORT_SYMBOL(func);
8120
8121define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8122define_netdev_printk_level(netdev_alert, KERN_ALERT);
8123define_netdev_printk_level(netdev_crit, KERN_CRIT);
8124define_netdev_printk_level(netdev_err, KERN_ERR);
8125define_netdev_printk_level(netdev_warn, KERN_WARNING);
8126define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8127define_netdev_printk_level(netdev_info, KERN_INFO);
8128
Pavel Emelyanov46650792007-10-08 20:38:39 -07008129static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008130{
8131 kfree(net->dev_name_head);
8132 kfree(net->dev_index_head);
8133}
8134
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008135static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07008136 .init = netdev_init,
8137 .exit = netdev_exit,
8138};
8139
Pavel Emelyanov46650792007-10-08 20:38:39 -07008140static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008141{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008142 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02008143 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008144 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02008145 * initial network namespace
8146 */
8147 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008148 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008149 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008150 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02008151
8152 /* Ignore unmoveable devices (i.e. loopback) */
8153 if (dev->features & NETIF_F_NETNS_LOCAL)
8154 continue;
8155
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008156 /* Leave virtual devices for the generic cleanup */
8157 if (dev->rtnl_link_ops)
8158 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08008159
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008160 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008161 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8162 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008163 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008164 pr_emerg("%s: failed to move %s to init_net: %d\n",
8165 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008166 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02008167 }
8168 }
8169 rtnl_unlock();
8170}
8171
Eric W. Biederman50624c92013-09-23 21:19:49 -07008172static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8173{
8174 /* Return with the rtnl_lock held when there are no network
8175 * devices unregistering in any network namespace in net_list.
8176 */
8177 struct net *net;
8178 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01008179 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008180
Peter Zijlstraff960a72014-10-29 17:04:56 +01008181 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008182 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07008183 unregistering = false;
8184 rtnl_lock();
8185 list_for_each_entry(net, net_list, exit_list) {
8186 if (net->dev_unreg_count > 0) {
8187 unregistering = true;
8188 break;
8189 }
8190 }
8191 if (!unregistering)
8192 break;
8193 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01008194
8195 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008196 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01008197 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008198}
8199
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008200static void __net_exit default_device_exit_batch(struct list_head *net_list)
8201{
8202 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04008203 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008204 * Do this across as many network namespaces as possible to
8205 * improve batching efficiency.
8206 */
8207 struct net_device *dev;
8208 struct net *net;
8209 LIST_HEAD(dev_kill_list);
8210
Eric W. Biederman50624c92013-09-23 21:19:49 -07008211 /* To prevent network device cleanup code from dereferencing
8212 * loopback devices or network devices that have been freed
8213 * wait here for all pending unregistrations to complete,
8214 * before unregistring the loopback device and allowing the
8215 * network namespace be freed.
8216 *
8217 * The netdev todo list containing all network devices
8218 * unregistrations that happen in default_device_exit_batch
8219 * will run in the rtnl_unlock() at the end of
8220 * default_device_exit_batch.
8221 */
8222 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008223 list_for_each_entry(net, net_list, exit_list) {
8224 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02008225 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008226 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8227 else
8228 unregister_netdevice_queue(dev, &dev_kill_list);
8229 }
8230 }
8231 unregister_netdevice_many(&dev_kill_list);
8232 rtnl_unlock();
8233}
8234
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008235static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008236 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008237 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02008238};
8239
Linus Torvalds1da177e2005-04-16 15:20:36 -07008240/*
8241 * Initialize the DEV module. At boot time this walks the device list and
8242 * unhooks any devices that fail to initialise (normally hardware not
8243 * present) and leaves us with a valid list of present and active devices.
8244 *
8245 */
8246
8247/*
8248 * This is called single threaded during boot, so no need
8249 * to take the rtnl semaphore.
8250 */
8251static int __init net_dev_init(void)
8252{
8253 int i, rc = -ENOMEM;
8254
8255 BUG_ON(!dev_boot_phase);
8256
Linus Torvalds1da177e2005-04-16 15:20:36 -07008257 if (dev_proc_init())
8258 goto out;
8259
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008260 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07008261 goto out;
8262
8263 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08008264 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008265 INIT_LIST_HEAD(&ptype_base[i]);
8266
Vlad Yasevich62532da2012-11-15 08:49:10 +00008267 INIT_LIST_HEAD(&offload_base);
8268
Eric W. Biederman881d9662007-09-17 11:56:21 -07008269 if (register_pernet_subsys(&netdev_net_ops))
8270 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008271
8272 /*
8273 * Initialise the packet receive queues.
8274 */
8275
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07008276 for_each_possible_cpu(i) {
Eric Dumazet41852492016-08-26 12:50:39 -07008277 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008278 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008279
Eric Dumazet41852492016-08-26 12:50:39 -07008280 INIT_WORK(flush, flush_backlog);
8281
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008282 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07008283 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008284 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00008285 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00008286#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008287 sd->csd.func = rps_trigger_softirq;
8288 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008289 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07008290#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00008291
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008292 sd->backlog.poll = process_backlog;
8293 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008294 }
8295
Linus Torvalds1da177e2005-04-16 15:20:36 -07008296 dev_boot_phase = 0;
8297
Eric W. Biederman505d4f72008-11-07 22:54:20 -08008298 /* The loopback device is special if any other network devices
8299 * is present in a network namespace the loopback device must
8300 * be present. Since we now dynamically allocate and free the
8301 * loopback device ensure this invariant is maintained by
8302 * keeping the loopback device as the first device on the
8303 * list of network devices. Ensuring the loopback devices
8304 * is the first device that appears and the last network device
8305 * that disappears.
8306 */
8307 if (register_pernet_device(&loopback_net_ops))
8308 goto out;
8309
8310 if (register_pernet_device(&default_device_ops))
8311 goto out;
8312
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008313 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8314 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008315
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008316 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8317 NULL, dev_cpu_dead);
8318 WARN_ON(rc < 0);
Thomas Graff38a9eb2015-07-21 10:43:56 +02008319 dst_subsys_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008320 rc = 0;
8321out:
8322 return rc;
8323}
8324
8325subsys_initcall(net_dev_init);