blob: 8cba3d852f251c503b193823b71b27aaef3fb3ae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -080099#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700103#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/pkt_sched.h>
105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <linux/netpoll.h>
111#include <linux/rcupdate.h>
112#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500115#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700116#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700117#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700118#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700119#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700120#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700122#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700123#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700124#include <linux/ipv6.h>
125#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700126#include <linux/jhash.h>
127#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700128#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900129#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900130#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000131#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700132#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000133#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100134#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300135#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700136#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100137#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400138#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800139#include <linux/hrtimer.h>
Pablo Neirae687ad62015-05-13 18:19:38 +0200140#include <linux/netfilter_ingress.h>
Tom Herbert6ae23ad2015-12-14 11:19:46 -0800141#include <linux/sctp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700143#include "net-sysfs.h"
144
Herbert Xud565b0a2008-12-15 23:38:52 -0800145/* Instead of increasing this, you should create a hash table. */
146#define MAX_GRO_SKBS 8
147
Herbert Xu5d38a072009-01-04 16:13:40 -0800148/* This should be increased if a protocol with a bigger head is added. */
149#define GRO_MAX_HEAD (MAX_HEADER + 128)
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000152static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000153struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
154struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000155static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000157static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli54951192014-07-01 21:39:43 -0700158static int call_netdevice_notifiers_info(unsigned long val,
159 struct net_device *dev,
160 struct netdev_notifier_info *info);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700163 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 * semaphore.
165 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800166 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 *
168 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700169 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 * actual updates. This allows pure readers to access the list even
171 * while a writer is preparing to update it.
172 *
173 * To put it another way, dev_base_lock is held for writing only to
174 * protect against pure readers; the rtnl semaphore provides the
175 * protection against other writers.
176 *
177 * See, for example usages, register_netdevice() and
178 * unregister_netdevice(), which must be called with the rtnl
179 * semaphore held.
180 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182EXPORT_SYMBOL(dev_base_lock);
183
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300184/* protects napi_hash addition/deletion and napi_gen_id */
185static DEFINE_SPINLOCK(napi_hash_lock);
186
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800187static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800188static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300189
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200190static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000191
Thomas Graf4e985ad2011-06-21 03:11:20 +0000192static inline void dev_base_seq_inc(struct net *net)
193{
194 while (++net->dev_base_seq == 0);
195}
196
Eric W. Biederman881d9662007-09-17 11:56:21 -0700197static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Eric Dumazet95c96172012-04-15 05:58:06 +0000199 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
200
stephen hemminger08e98972009-11-10 07:20:34 +0000201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
Eric W. Biederman881d9662007-09-17 11:56:21 -0700204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000209static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000210{
211#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000212 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000213#endif
214}
215
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000216static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000217{
218#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000219 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000220#endif
221}
222
Eric W. Biedermance286d32007-09-12 13:53:49 +0200223/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000224static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200225{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900226 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800231 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000232 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000233 hlist_add_head_rcu(&dev->index_hlist,
234 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200235 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000236
237 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200238}
239
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000240/* Device list removal
241 * caller must respect a RCU grace period before freeing/reusing dev
242 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243static void unlist_netdevice(struct net_device *dev)
244{
245 ASSERT_RTNL();
246
247 /* Unlink dev from the device chain */
248 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800249 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000250 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000251 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200252 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000253
254 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200255}
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257/*
258 * Our notifier list
259 */
260
Alan Sternf07d5b92006-05-09 15:23:03 -0700261static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263/*
264 * Device drivers call our routines to queue packets here. We empty the
265 * queue in the local softnet handler.
266 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700267
Eric Dumazet9958da02010-04-17 04:17:02 +0000268DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700269EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
David S. Millercf508b12008-07-22 14:16:42 -0700271#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700272/*
David S. Millerc773e842008-07-08 23:13:53 -0700273 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274 * according to dev->type
275 */
276static const unsigned short netdev_lock_type[] =
277 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
278 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
279 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
280 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
281 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
282 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
283 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
284 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
285 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
286 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
287 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
288 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400289 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
290 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
291 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700293static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700294 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400306 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
307 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
308 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309
310static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700311static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700312
313static inline unsigned short netdev_lock_pos(unsigned short dev_type)
314{
315 int i;
316
317 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
318 if (netdev_lock_type[i] == dev_type)
319 return i;
320 /* the last key is used by default */
321 return ARRAY_SIZE(netdev_lock_type) - 1;
322}
323
David S. Millercf508b12008-07-22 14:16:42 -0700324static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326{
327 int i;
328
329 i = netdev_lock_pos(dev_type);
330 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
331 netdev_lock_name[i]);
332}
David S. Millercf508b12008-07-22 14:16:42 -0700333
334static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
335{
336 int i;
337
338 i = netdev_lock_pos(dev->type);
339 lockdep_set_class_and_name(&dev->addr_list_lock,
340 &netdev_addr_lock_key[i],
341 netdev_lock_name[i]);
342}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700343#else
David S. Millercf508b12008-07-22 14:16:42 -0700344static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
345 unsigned short dev_type)
346{
347}
348static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700349{
350}
351#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353/*******************************************************************************
354
355 Protocol management and registration routines
356
357*******************************************************************************/
358
359/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 * Add a protocol ID to the list. Now that the input handler is
361 * smarter we can dispense with all the messy stuff that used to be
362 * here.
363 *
364 * BEWARE!!! Protocol handlers, mangling input packets,
365 * MUST BE last in hash buckets and checking protocol handlers
366 * MUST start from promiscuous ptype_all chain in net_bh.
367 * It is true now, do not change it.
368 * Explanation follows: if protocol handler, mangling packet, will
369 * be the first on list, it is not able to sense, that packet
370 * is cloned and should be copied-on-write, so that it will
371 * change it and subsequent readers will get broken packet.
372 * --ANK (980803)
373 */
374
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000375static inline struct list_head *ptype_head(const struct packet_type *pt)
376{
377 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800378 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000379 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800380 return pt->dev ? &pt->dev->ptype_specific :
381 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000382}
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384/**
385 * dev_add_pack - add packet handler
386 * @pt: packet type declaration
387 *
388 * Add a protocol handler to the networking stack. The passed &packet_type
389 * is linked into kernel lists and may not be freed until it has been
390 * removed from the kernel lists.
391 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 * guarantee all CPU's that are in middle of receiving packets
394 * will see the new packet type (until the next received packet).
395 */
396
397void dev_add_pack(struct packet_type *pt)
398{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000399 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000401 spin_lock(&ptype_lock);
402 list_add_rcu(&pt->list, head);
403 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700405EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407/**
408 * __dev_remove_pack - remove packet handler
409 * @pt: packet type declaration
410 *
411 * Remove a protocol handler that was previously added to the kernel
412 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
413 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900414 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 *
416 * The packet type might still be in use by receivers
417 * and must not be freed until after all the CPU's have gone
418 * through a quiescent state.
419 */
420void __dev_remove_pack(struct packet_type *pt)
421{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000422 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 struct packet_type *pt1;
424
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000425 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427 list_for_each_entry(pt1, head, list) {
428 if (pt == pt1) {
429 list_del_rcu(&pt->list);
430 goto out;
431 }
432 }
433
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000434 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000436 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700438EXPORT_SYMBOL(__dev_remove_pack);
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440/**
441 * dev_remove_pack - remove packet handler
442 * @pt: packet type declaration
443 *
444 * Remove a protocol handler that was previously added to the kernel
445 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
446 * from the kernel lists and can be freed or reused once this function
447 * returns.
448 *
449 * This call sleeps to guarantee that no CPU is looking at the packet
450 * type after return.
451 */
452void dev_remove_pack(struct packet_type *pt)
453{
454 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 synchronize_net();
457}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700458EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Vlad Yasevich62532da2012-11-15 08:49:10 +0000460
461/**
462 * dev_add_offload - register offload handlers
463 * @po: protocol offload declaration
464 *
465 * Add protocol offload handlers to the networking stack. The passed
466 * &proto_offload is linked into kernel lists and may not be freed until
467 * it has been removed from the kernel lists.
468 *
469 * This call does not sleep therefore it can not
470 * guarantee all CPU's that are in middle of receiving packets
471 * will see the new offload handlers (until the next received packet).
472 */
473void dev_add_offload(struct packet_offload *po)
474{
David S. Millerbdef7de2015-06-01 14:56:09 -0700475 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000476
477 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700478 list_for_each_entry(elem, &offload_base, list) {
479 if (po->priority < elem->priority)
480 break;
481 }
482 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000483 spin_unlock(&offload_lock);
484}
485EXPORT_SYMBOL(dev_add_offload);
486
487/**
488 * __dev_remove_offload - remove offload handler
489 * @po: packet offload declaration
490 *
491 * Remove a protocol offload handler that was previously added to the
492 * kernel offload handlers by dev_add_offload(). The passed &offload_type
493 * is removed from the kernel lists and can be freed or reused once this
494 * function returns.
495 *
496 * The packet type might still be in use by receivers
497 * and must not be freed until after all the CPU's have gone
498 * through a quiescent state.
499 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800500static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000501{
502 struct list_head *head = &offload_base;
503 struct packet_offload *po1;
504
Eric Dumazetc53aa502012-11-16 08:08:23 +0000505 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000506
507 list_for_each_entry(po1, head, list) {
508 if (po == po1) {
509 list_del_rcu(&po->list);
510 goto out;
511 }
512 }
513
514 pr_warn("dev_remove_offload: %p not found\n", po);
515out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000516 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000517}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000518
519/**
520 * dev_remove_offload - remove packet offload handler
521 * @po: packet offload declaration
522 *
523 * Remove a packet offload handler that was previously added to the kernel
524 * offload handlers by dev_add_offload(). The passed &offload_type is
525 * removed from the kernel lists and can be freed or reused once this
526 * function returns.
527 *
528 * This call sleeps to guarantee that no CPU is looking at the packet
529 * type after return.
530 */
531void dev_remove_offload(struct packet_offload *po)
532{
533 __dev_remove_offload(po);
534
535 synchronize_net();
536}
537EXPORT_SYMBOL(dev_remove_offload);
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539/******************************************************************************
540
541 Device Boot-time Settings Routines
542
543*******************************************************************************/
544
545/* Boot time configuration table */
546static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
547
548/**
549 * netdev_boot_setup_add - add new setup entry
550 * @name: name of the device
551 * @map: configured settings for the device
552 *
553 * Adds new setup entry to the dev_boot_setup list. The function
554 * returns 0 on error and 1 on success. This is a generic routine to
555 * all netdevices.
556 */
557static int netdev_boot_setup_add(char *name, struct ifmap *map)
558{
559 struct netdev_boot_setup *s;
560 int i;
561
562 s = dev_boot_setup;
563 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
564 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
565 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700566 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 memcpy(&s[i].map, map, sizeof(s[i].map));
568 break;
569 }
570 }
571
572 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
573}
574
575/**
576 * netdev_boot_setup_check - check boot time settings
577 * @dev: the netdevice
578 *
579 * Check boot time settings for the device.
580 * The found settings are set for the device to be used
581 * later in the device probing.
582 * Returns 0 if no settings found, 1 if they are.
583 */
584int netdev_boot_setup_check(struct net_device *dev)
585{
586 struct netdev_boot_setup *s = dev_boot_setup;
587 int i;
588
589 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
590 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700591 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 dev->irq = s[i].map.irq;
593 dev->base_addr = s[i].map.base_addr;
594 dev->mem_start = s[i].map.mem_start;
595 dev->mem_end = s[i].map.mem_end;
596 return 1;
597 }
598 }
599 return 0;
600}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700601EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
603
604/**
605 * netdev_boot_base - get address from boot time settings
606 * @prefix: prefix for network device
607 * @unit: id for network device
608 *
609 * Check boot time settings for the base address of device.
610 * The found settings are set for the device to be used
611 * later in the device probing.
612 * Returns 0 if no settings found.
613 */
614unsigned long netdev_boot_base(const char *prefix, int unit)
615{
616 const struct netdev_boot_setup *s = dev_boot_setup;
617 char name[IFNAMSIZ];
618 int i;
619
620 sprintf(name, "%s%d", prefix, unit);
621
622 /*
623 * If device already registered then return base of 1
624 * to indicate not to probe for this interface
625 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700626 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 return 1;
628
629 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
630 if (!strcmp(name, s[i].name))
631 return s[i].map.base_addr;
632 return 0;
633}
634
635/*
636 * Saves at boot time configured settings for any netdevice.
637 */
638int __init netdev_boot_setup(char *str)
639{
640 int ints[5];
641 struct ifmap map;
642
643 str = get_options(str, ARRAY_SIZE(ints), ints);
644 if (!str || !*str)
645 return 0;
646
647 /* Save settings */
648 memset(&map, 0, sizeof(map));
649 if (ints[0] > 0)
650 map.irq = ints[1];
651 if (ints[0] > 1)
652 map.base_addr = ints[2];
653 if (ints[0] > 2)
654 map.mem_start = ints[3];
655 if (ints[0] > 3)
656 map.mem_end = ints[4];
657
658 /* Add new entry to the list */
659 return netdev_boot_setup_add(str, &map);
660}
661
662__setup("netdev=", netdev_boot_setup);
663
664/*******************************************************************************
665
666 Device Interface Subroutines
667
668*******************************************************************************/
669
670/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200671 * dev_get_iflink - get 'iflink' value of a interface
672 * @dev: targeted interface
673 *
674 * Indicates the ifindex the interface is linked to.
675 * Physical interfaces have the same 'ifindex' and 'iflink' values.
676 */
677
678int dev_get_iflink(const struct net_device *dev)
679{
680 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
681 return dev->netdev_ops->ndo_get_iflink(dev);
682
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200683 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200684}
685EXPORT_SYMBOL(dev_get_iflink);
686
687/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700688 * dev_fill_metadata_dst - Retrieve tunnel egress information.
689 * @dev: targeted interface
690 * @skb: The packet.
691 *
692 * For better visibility of tunnel traffic OVS needs to retrieve
693 * egress tunnel information for a packet. Following API allows
694 * user to get this info.
695 */
696int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
697{
698 struct ip_tunnel_info *info;
699
700 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
701 return -EINVAL;
702
703 info = skb_tunnel_info_unclone(skb);
704 if (!info)
705 return -ENOMEM;
706 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
707 return -EINVAL;
708
709 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
710}
711EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
712
713/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700715 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 * @name: name to find
717 *
718 * Find an interface by name. Must be called under RTNL semaphore
719 * or @dev_base_lock. If the name is found a pointer to the device
720 * is returned. If the name is not found then %NULL is returned. The
721 * reference counters are not incremented so the caller must be
722 * careful with locks.
723 */
724
Eric W. Biederman881d9662007-09-17 11:56:21 -0700725struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700727 struct net_device *dev;
728 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Sasha Levinb67bfe02013-02-27 17:06:00 -0800730 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 if (!strncmp(dev->name, name, IFNAMSIZ))
732 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 return NULL;
735}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700736EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
738/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000739 * dev_get_by_name_rcu - find a device by its name
740 * @net: the applicable net namespace
741 * @name: name to find
742 *
743 * Find an interface by name.
744 * If the name is found a pointer to the device is returned.
745 * If the name is not found then %NULL is returned.
746 * The reference counters are not incremented so the caller must be
747 * careful with locks. The caller must hold RCU lock.
748 */
749
750struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
751{
Eric Dumazet72c95282009-10-30 07:11:27 +0000752 struct net_device *dev;
753 struct hlist_head *head = dev_name_hash(net, name);
754
Sasha Levinb67bfe02013-02-27 17:06:00 -0800755 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 if (!strncmp(dev->name, name, IFNAMSIZ))
757 return dev;
758
759 return NULL;
760}
761EXPORT_SYMBOL(dev_get_by_name_rcu);
762
763/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700765 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 * @name: name to find
767 *
768 * Find an interface by name. This can be called from any
769 * context and does its own locking. The returned handle has
770 * the usage count incremented and the caller must use dev_put() to
771 * release it when it is no longer needed. %NULL is returned if no
772 * matching device is found.
773 */
774
Eric W. Biederman881d9662007-09-17 11:56:21 -0700775struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776{
777 struct net_device *dev;
778
Eric Dumazet72c95282009-10-30 07:11:27 +0000779 rcu_read_lock();
780 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 if (dev)
782 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000783 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return dev;
785}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700786EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
788/**
789 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700790 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 * @ifindex: index of device
792 *
793 * Search for an interface by index. Returns %NULL if the device
794 * is not found or a pointer to the device. The device has not
795 * had its reference counter increased so the caller must be careful
796 * about locking. The caller must hold either the RTNL semaphore
797 * or @dev_base_lock.
798 */
799
Eric W. Biederman881d9662007-09-17 11:56:21 -0700800struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700802 struct net_device *dev;
803 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Sasha Levinb67bfe02013-02-27 17:06:00 -0800805 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 if (dev->ifindex == ifindex)
807 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 return NULL;
810}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700811EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000813/**
814 * dev_get_by_index_rcu - find a device by its ifindex
815 * @net: the applicable net namespace
816 * @ifindex: index of device
817 *
818 * Search for an interface by index. Returns %NULL if the device
819 * is not found or a pointer to the device. The device has not
820 * had its reference counter increased so the caller must be careful
821 * about locking. The caller must hold RCU lock.
822 */
823
824struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
825{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000826 struct net_device *dev;
827 struct hlist_head *head = dev_index_hash(net, ifindex);
828
Sasha Levinb67bfe02013-02-27 17:06:00 -0800829 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000830 if (dev->ifindex == ifindex)
831 return dev;
832
833 return NULL;
834}
835EXPORT_SYMBOL(dev_get_by_index_rcu);
836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
838/**
839 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700840 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 * @ifindex: index of device
842 *
843 * Search for an interface by index. Returns NULL if the device
844 * is not found or a pointer to the device. The device returned has
845 * had a reference added and the pointer is safe until the user calls
846 * dev_put to indicate they have finished with it.
847 */
848
Eric W. Biederman881d9662007-09-17 11:56:21 -0700849struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850{
851 struct net_device *dev;
852
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000853 rcu_read_lock();
854 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 if (dev)
856 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000857 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 return dev;
859}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700860EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200863 * netdev_get_name - get a netdevice name, knowing its ifindex.
864 * @net: network namespace
865 * @name: a pointer to the buffer where the name will be stored.
866 * @ifindex: the ifindex of the interface to get the name from.
867 *
868 * The use of raw_seqcount_begin() and cond_resched() before
869 * retrying is required as we want to give the writers a chance
870 * to complete when CONFIG_PREEMPT is not set.
871 */
872int netdev_get_name(struct net *net, char *name, int ifindex)
873{
874 struct net_device *dev;
875 unsigned int seq;
876
877retry:
878 seq = raw_seqcount_begin(&devnet_rename_seq);
879 rcu_read_lock();
880 dev = dev_get_by_index_rcu(net, ifindex);
881 if (!dev) {
882 rcu_read_unlock();
883 return -ENODEV;
884 }
885
886 strcpy(name, dev->name);
887 rcu_read_unlock();
888 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
889 cond_resched();
890 goto retry;
891 }
892
893 return 0;
894}
895
896/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000897 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700898 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 * @type: media type of device
900 * @ha: hardware address
901 *
902 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800903 * is not found or a pointer to the device.
904 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000905 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 * and the caller must therefore be careful about locking
907 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 */
909
Eric Dumazet941666c2010-12-05 01:23:53 +0000910struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
911 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912{
913 struct net_device *dev;
914
Eric Dumazet941666c2010-12-05 01:23:53 +0000915 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 if (dev->type == type &&
917 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700918 return dev;
919
920 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921}
Eric Dumazet941666c2010-12-05 01:23:53 +0000922EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300923
Eric W. Biederman881d9662007-09-17 11:56:21 -0700924struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700925{
926 struct net_device *dev;
927
928 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700929 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700930 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700931 return dev;
932
933 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700934}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700935EXPORT_SYMBOL(__dev_getfirstbyhwtype);
936
Eric W. Biederman881d9662007-09-17 11:56:21 -0700937struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000939 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000941 rcu_read_lock();
942 for_each_netdev_rcu(net, dev)
943 if (dev->type == type) {
944 dev_hold(dev);
945 ret = dev;
946 break;
947 }
948 rcu_read_unlock();
949 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951EXPORT_SYMBOL(dev_getfirstbyhwtype);
952
953/**
WANG Cong6c555492014-09-11 15:35:09 -0700954 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700955 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 * @if_flags: IFF_* values
957 * @mask: bitmask of bits in if_flags to check
958 *
959 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000960 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700961 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 */
963
WANG Cong6c555492014-09-11 15:35:09 -0700964struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
965 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700967 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
WANG Cong6c555492014-09-11 15:35:09 -0700969 ASSERT_RTNL();
970
Pavel Emelianov7562f872007-05-03 15:13:45 -0700971 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -0700972 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700974 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 break;
976 }
977 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700978 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979}
WANG Cong6c555492014-09-11 15:35:09 -0700980EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982/**
983 * dev_valid_name - check if name is okay for network device
984 * @name: name string
985 *
986 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700987 * to allow sysfs to work. We also disallow any kind of
988 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 */
David S. Miller95f050b2012-03-06 16:12:15 -0500990bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700992 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500993 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700994 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500995 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700996 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500997 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700998
999 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -06001000 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001001 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001002 name++;
1003 }
David S. Miller95f050b2012-03-06 16:12:15 -05001004 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001006EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
1008/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001009 * __dev_alloc_name - allocate a name for a device
1010 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001012 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 *
1014 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001015 * id. It scans list of devices to build up a free map, then chooses
1016 * the first empty slot. The caller must hold the dev_base or rtnl lock
1017 * while allocating the name and adding the device in order to avoid
1018 * duplicates.
1019 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1020 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 */
1022
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001023static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
1025 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 const char *p;
1027 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001028 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 struct net_device *d;
1030
1031 p = strnchr(name, IFNAMSIZ-1, '%');
1032 if (p) {
1033 /*
1034 * Verify the string as this thing may have come from
1035 * the user. There must be either one "%d" and no other "%"
1036 * characters.
1037 */
1038 if (p[1] != 'd' || strchr(p + 2, '%'))
1039 return -EINVAL;
1040
1041 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001042 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 if (!inuse)
1044 return -ENOMEM;
1045
Eric W. Biederman881d9662007-09-17 11:56:21 -07001046 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 if (!sscanf(d->name, name, &i))
1048 continue;
1049 if (i < 0 || i >= max_netdevices)
1050 continue;
1051
1052 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001053 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 if (!strncmp(buf, d->name, IFNAMSIZ))
1055 set_bit(i, inuse);
1056 }
1057
1058 i = find_first_zero_bit(inuse, max_netdevices);
1059 free_page((unsigned long) inuse);
1060 }
1061
Octavian Purdilad9031022009-11-18 02:36:59 +00001062 if (buf != name)
1063 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001064 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
1067 /* It is possible to run out of possible slots
1068 * when the name is long and there isn't enough space left
1069 * for the digits, or if all bits are used.
1070 */
1071 return -ENFILE;
1072}
1073
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001074/**
1075 * dev_alloc_name - allocate a name for a device
1076 * @dev: device
1077 * @name: name format string
1078 *
1079 * Passed a format string - eg "lt%d" it will try and find a suitable
1080 * id. It scans list of devices to build up a free map, then chooses
1081 * the first empty slot. The caller must hold the dev_base or rtnl lock
1082 * while allocating the name and adding the device in order to avoid
1083 * duplicates.
1084 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1085 * Returns the number of the unit assigned or a negative errno code.
1086 */
1087
1088int dev_alloc_name(struct net_device *dev, const char *name)
1089{
1090 char buf[IFNAMSIZ];
1091 struct net *net;
1092 int ret;
1093
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001094 BUG_ON(!dev_net(dev));
1095 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001096 ret = __dev_alloc_name(net, name, buf);
1097 if (ret >= 0)
1098 strlcpy(dev->name, buf, IFNAMSIZ);
1099 return ret;
1100}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001101EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001102
Gao feng828de4f2012-09-13 20:58:27 +00001103static int dev_alloc_name_ns(struct net *net,
1104 struct net_device *dev,
1105 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001106{
Gao feng828de4f2012-09-13 20:58:27 +00001107 char buf[IFNAMSIZ];
1108 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001109
Gao feng828de4f2012-09-13 20:58:27 +00001110 ret = __dev_alloc_name(net, name, buf);
1111 if (ret >= 0)
1112 strlcpy(dev->name, buf, IFNAMSIZ);
1113 return ret;
1114}
1115
1116static int dev_get_valid_name(struct net *net,
1117 struct net_device *dev,
1118 const char *name)
1119{
1120 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001121
Octavian Purdilad9031022009-11-18 02:36:59 +00001122 if (!dev_valid_name(name))
1123 return -EINVAL;
1124
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001125 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001126 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001127 else if (__dev_get_by_name(net, name))
1128 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001129 else if (dev->name != name)
1130 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001131
1132 return 0;
1133}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135/**
1136 * dev_change_name - change name of a device
1137 * @dev: device
1138 * @newname: name (or format string) must be at least IFNAMSIZ
1139 *
1140 * Change name of a device, can pass format strings "eth%d".
1141 * for wildcarding.
1142 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001143int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144{
Tom Gundersen238fa362014-07-14 16:37:23 +02001145 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001146 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001148 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001149 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
1151 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001152 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001154 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 if (dev->flags & IFF_UP)
1156 return -EBUSY;
1157
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001158 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001159
1160 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001161 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001162 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001163 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001164
Herbert Xufcc5a032007-07-30 17:03:38 -07001165 memcpy(oldname, dev->name, IFNAMSIZ);
1166
Gao feng828de4f2012-09-13 20:58:27 +00001167 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001168 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001169 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001170 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001173 if (oldname[0] && !strchr(oldname, '%'))
1174 netdev_info(dev, "renamed from %s\n", oldname);
1175
Tom Gundersen238fa362014-07-14 16:37:23 +02001176 old_assign_type = dev->name_assign_type;
1177 dev->name_assign_type = NET_NAME_RENAMED;
1178
Herbert Xufcc5a032007-07-30 17:03:38 -07001179rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001180 ret = device_rename(&dev->dev, dev->name);
1181 if (ret) {
1182 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001183 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001184 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001185 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001186 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001187
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001188 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001189
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001190 netdev_adjacent_rename_links(dev, oldname);
1191
Herbert Xu7f988ea2007-07-30 16:35:46 -07001192 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001193 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001194 write_unlock_bh(&dev_base_lock);
1195
1196 synchronize_rcu();
1197
1198 write_lock_bh(&dev_base_lock);
1199 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001200 write_unlock_bh(&dev_base_lock);
1201
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001202 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001203 ret = notifier_to_errno(ret);
1204
1205 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001206 /* err >= 0 after dev_alloc_name() or stores the first errno */
1207 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001208 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001209 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001210 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001211 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001212 dev->name_assign_type = old_assign_type;
1213 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001214 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001215 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001216 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001217 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001218 }
1219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
1221 return err;
1222}
1223
1224/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001225 * dev_set_alias - change ifalias of a device
1226 * @dev: device
1227 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001228 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001229 *
1230 * Set ifalias for a device,
1231 */
1232int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1233{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001234 char *new_ifalias;
1235
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001236 ASSERT_RTNL();
1237
1238 if (len >= IFALIASZ)
1239 return -EINVAL;
1240
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001241 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001242 kfree(dev->ifalias);
1243 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001244 return 0;
1245 }
1246
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001247 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1248 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001249 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001250 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001251
1252 strlcpy(dev->ifalias, alias, len+1);
1253 return len;
1254}
1255
1256
1257/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001258 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001259 * @dev: device to cause notification
1260 *
1261 * Called to indicate a device has changed features.
1262 */
1263void netdev_features_change(struct net_device *dev)
1264{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001265 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001266}
1267EXPORT_SYMBOL(netdev_features_change);
1268
1269/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 * netdev_state_change - device changes state
1271 * @dev: device to cause notification
1272 *
1273 * Called to indicate a device has changed state. This function calls
1274 * the notifier chains for netdev_chain and sends a NEWLINK message
1275 * to the routing socket.
1276 */
1277void netdev_state_change(struct net_device *dev)
1278{
1279 if (dev->flags & IFF_UP) {
Loic Prylli54951192014-07-01 21:39:43 -07001280 struct netdev_notifier_change_info change_info;
1281
1282 change_info.flags_changed = 0;
1283 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1284 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001285 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 }
1287}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001288EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Amerigo Wangee89bab2012-08-09 22:14:56 +00001290/**
1291 * netdev_notify_peers - notify network peers about existence of @dev
1292 * @dev: network device
1293 *
1294 * Generate traffic such that interested network peers are aware of
1295 * @dev, such as by generating a gratuitous ARP. This may be used when
1296 * a device wants to inform the rest of the network about some sort of
1297 * reconfiguration such as a failover event or virtual machine
1298 * migration.
1299 */
1300void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001301{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001302 rtnl_lock();
1303 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1304 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001305}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001306EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001307
Patrick McHardybd380812010-02-26 06:34:53 +00001308static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001310 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001311 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001313 ASSERT_RTNL();
1314
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 if (!netif_device_present(dev))
1316 return -ENODEV;
1317
Neil Hormanca99ca12013-02-05 08:05:43 +00001318 /* Block netpoll from trying to do any rx path servicing.
1319 * If we don't do this there is a chance ndo_poll_controller
1320 * or ndo_poll may be running while we open the device
1321 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001322 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001323
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001324 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1325 ret = notifier_to_errno(ret);
1326 if (ret)
1327 return ret;
1328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001330
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001331 if (ops->ndo_validate_addr)
1332 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001333
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001334 if (!ret && ops->ndo_open)
1335 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
Eric W. Biederman66b55522014-03-27 15:39:03 -07001337 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001338
Jeff Garzikbada3392007-10-23 20:19:37 -07001339 if (ret)
1340 clear_bit(__LINK_STATE_START, &dev->state);
1341 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001343 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001345 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 return ret;
1349}
Patrick McHardybd380812010-02-26 06:34:53 +00001350
1351/**
1352 * dev_open - prepare an interface for use.
1353 * @dev: device to open
1354 *
1355 * Takes a device from down to up state. The device's private open
1356 * function is invoked and then the multicast lists are loaded. Finally
1357 * the device is moved into the up state and a %NETDEV_UP message is
1358 * sent to the netdev notifier chain.
1359 *
1360 * Calling this function on an active interface is a nop. On a failure
1361 * a negative errno code is returned.
1362 */
1363int dev_open(struct net_device *dev)
1364{
1365 int ret;
1366
Patrick McHardybd380812010-02-26 06:34:53 +00001367 if (dev->flags & IFF_UP)
1368 return 0;
1369
Patrick McHardybd380812010-02-26 06:34:53 +00001370 ret = __dev_open(dev);
1371 if (ret < 0)
1372 return ret;
1373
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001375 call_netdevice_notifiers(NETDEV_UP, dev);
1376
1377 return ret;
1378}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001379EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Octavian Purdila44345722010-12-13 12:44:07 +00001381static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382{
Octavian Purdila44345722010-12-13 12:44:07 +00001383 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001384
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001385 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001386 might_sleep();
1387
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001388 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001389 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001390 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001391
Octavian Purdila44345722010-12-13 12:44:07 +00001392 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Octavian Purdila44345722010-12-13 12:44:07 +00001394 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Octavian Purdila44345722010-12-13 12:44:07 +00001396 /* Synchronize to scheduled poll. We cannot touch poll list, it
1397 * can be even on different cpu. So just clear netif_running().
1398 *
1399 * dev->stop() will invoke napi_disable() on all of it's
1400 * napi_struct instances on this device.
1401 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001402 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Octavian Purdila44345722010-12-13 12:44:07 +00001405 dev_deactivate_many(head);
1406
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001407 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001408 const struct net_device_ops *ops = dev->netdev_ops;
1409
1410 /*
1411 * Call the device specific close. This cannot fail.
1412 * Only if device is UP
1413 *
1414 * We allow it to be called even after a DETACH hot-plug
1415 * event.
1416 */
1417 if (ops->ndo_stop)
1418 ops->ndo_stop(dev);
1419
Octavian Purdila44345722010-12-13 12:44:07 +00001420 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001421 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001422 }
1423
1424 return 0;
1425}
1426
1427static int __dev_close(struct net_device *dev)
1428{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001429 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001430 LIST_HEAD(single);
1431
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001432 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001433 retval = __dev_close_many(&single);
1434 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001435
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001436 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001437}
1438
David S. Miller99c4a262015-03-18 22:52:33 -04001439int dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001440{
1441 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001442
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001443 /* Remove the devices that don't need to be closed */
1444 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001445 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001446 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001447
1448 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001449
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001450 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001451 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001452 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001453 if (unlink)
1454 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001455 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return 0;
1458}
David S. Miller99c4a262015-03-18 22:52:33 -04001459EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001460
1461/**
1462 * dev_close - shutdown an interface.
1463 * @dev: device to shutdown
1464 *
1465 * This function moves an active device into down state. A
1466 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1467 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1468 * chain.
1469 */
1470int dev_close(struct net_device *dev)
1471{
Eric Dumazete14a5992011-05-10 12:26:06 -07001472 if (dev->flags & IFF_UP) {
1473 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001474
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001475 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001476 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001477 list_del(&single);
1478 }
dingtianhongda6e3782013-05-27 19:53:31 +00001479 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001480}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001481EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
1483
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001484/**
1485 * dev_disable_lro - disable Large Receive Offload on a device
1486 * @dev: device
1487 *
1488 * Disable Large Receive Offload (LRO) on a net device. Must be
1489 * called under RTNL. This is needed if received packets may be
1490 * forwarded to another interface.
1491 */
1492void dev_disable_lro(struct net_device *dev)
1493{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001494 struct net_device *lower_dev;
1495 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001496
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001497 dev->wanted_features &= ~NETIF_F_LRO;
1498 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001499
Michał Mirosław22d59692011-04-21 12:42:15 +00001500 if (unlikely(dev->features & NETIF_F_LRO))
1501 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001502
1503 netdev_for_each_lower_dev(dev, lower_dev, iter)
1504 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001505}
1506EXPORT_SYMBOL(dev_disable_lro);
1507
Jiri Pirko351638e2013-05-28 01:30:21 +00001508static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1509 struct net_device *dev)
1510{
1511 struct netdev_notifier_info info;
1512
1513 netdev_notifier_info_init(&info, dev);
1514 return nb->notifier_call(nb, val, &info);
1515}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001516
Eric W. Biederman881d9662007-09-17 11:56:21 -07001517static int dev_boot_phase = 1;
1518
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519/**
1520 * register_netdevice_notifier - register a network notifier block
1521 * @nb: notifier
1522 *
1523 * Register a notifier to be called when network device events occur.
1524 * The notifier passed is linked into the kernel structures and must
1525 * not be reused until it has been unregistered. A negative errno code
1526 * is returned on a failure.
1527 *
1528 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001529 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 * view of the network device list.
1531 */
1532
1533int register_netdevice_notifier(struct notifier_block *nb)
1534{
1535 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001536 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001537 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 int err;
1539
1540 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001541 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001542 if (err)
1543 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001544 if (dev_boot_phase)
1545 goto unlock;
1546 for_each_net(net) {
1547 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001548 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001549 err = notifier_to_errno(err);
1550 if (err)
1551 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
Eric W. Biederman881d9662007-09-17 11:56:21 -07001553 if (!(dev->flags & IFF_UP))
1554 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001555
Jiri Pirko351638e2013-05-28 01:30:21 +00001556 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001559
1560unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 rtnl_unlock();
1562 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001563
1564rollback:
1565 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001566 for_each_net(net) {
1567 for_each_netdev(net, dev) {
1568 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001569 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001570
Eric W. Biederman881d9662007-09-17 11:56:21 -07001571 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001572 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1573 dev);
1574 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001575 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001576 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001577 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001578 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001579
RongQing.Li8f891482011-11-30 23:43:07 -05001580outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001581 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001582 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001584EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
1586/**
1587 * unregister_netdevice_notifier - unregister a network notifier block
1588 * @nb: notifier
1589 *
1590 * Unregister a notifier previously registered by
1591 * register_netdevice_notifier(). The notifier is unlinked into the
1592 * kernel structures and may then be reused. A negative errno code
1593 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001594 *
1595 * After unregistering unregister and down device events are synthesized
1596 * for all devices on the device list to the removed notifier to remove
1597 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 */
1599
1600int unregister_netdevice_notifier(struct notifier_block *nb)
1601{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001602 struct net_device *dev;
1603 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001604 int err;
1605
1606 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001607 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001608 if (err)
1609 goto unlock;
1610
1611 for_each_net(net) {
1612 for_each_netdev(net, dev) {
1613 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001614 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1615 dev);
1616 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001617 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001618 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001619 }
1620 }
1621unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001622 rtnl_unlock();
1623 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001625EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
1627/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001628 * call_netdevice_notifiers_info - call all network notifier blocks
1629 * @val: value passed unmodified to notifier function
1630 * @dev: net_device pointer passed unmodified to notifier function
1631 * @info: notifier information data
1632 *
1633 * Call all network notifier blocks. Parameters and return value
1634 * are as for raw_notifier_call_chain().
1635 */
1636
stephen hemminger1d143d92013-12-29 14:01:29 -08001637static int call_netdevice_notifiers_info(unsigned long val,
1638 struct net_device *dev,
1639 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001640{
1641 ASSERT_RTNL();
1642 netdev_notifier_info_init(info, dev);
1643 return raw_notifier_call_chain(&netdev_chain, val, info);
1644}
Jiri Pirko351638e2013-05-28 01:30:21 +00001645
1646/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 * call_netdevice_notifiers - call all network notifier blocks
1648 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001649 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 *
1651 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001652 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 */
1654
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001655int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
Jiri Pirko351638e2013-05-28 01:30:21 +00001657 struct netdev_notifier_info info;
1658
1659 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001661EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Pablo Neira1cf519002015-05-13 18:19:37 +02001663#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02001664static struct static_key ingress_needed __read_mostly;
1665
1666void net_inc_ingress_queue(void)
1667{
1668 static_key_slow_inc(&ingress_needed);
1669}
1670EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1671
1672void net_dec_ingress_queue(void)
1673{
1674 static_key_slow_dec(&ingress_needed);
1675}
1676EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1677#endif
1678
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001679#ifdef CONFIG_NET_EGRESS
1680static struct static_key egress_needed __read_mostly;
1681
1682void net_inc_egress_queue(void)
1683{
1684 static_key_slow_inc(&egress_needed);
1685}
1686EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1687
1688void net_dec_egress_queue(void)
1689{
1690 static_key_slow_dec(&egress_needed);
1691}
1692EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1693#endif
1694
Ingo Molnarc5905af2012-02-24 08:31:31 +01001695static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001696#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001697/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001698 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001699 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001700 */
1701static atomic_t netstamp_needed_deferred;
1702#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704void net_enable_timestamp(void)
1705{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001706#ifdef HAVE_JUMP_LABEL
1707 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1708
1709 if (deferred) {
1710 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001711 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001712 return;
1713 }
1714#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001715 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001717EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719void net_disable_timestamp(void)
1720{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001721#ifdef HAVE_JUMP_LABEL
1722 if (in_interrupt()) {
1723 atomic_inc(&netstamp_needed_deferred);
1724 return;
1725 }
1726#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001727 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001729EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
Eric Dumazet3b098e22010-05-15 23:57:10 -07001731static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732{
Eric Dumazet588f0332011-11-15 04:12:55 +00001733 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001734 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001735 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736}
1737
Eric Dumazet588f0332011-11-15 04:12:55 +00001738#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001739 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001740 if ((COND) && !(SKB)->tstamp.tv64) \
1741 __net_timestamp(SKB); \
1742 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001743
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001744bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001745{
1746 unsigned int len;
1747
1748 if (!(dev->flags & IFF_UP))
1749 return false;
1750
1751 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1752 if (skb->len <= len)
1753 return true;
1754
1755 /* if TSO is enabled, we don't care about the length as the packet
1756 * could be forwarded without being segmented before
1757 */
1758 if (skb_is_gso(skb))
1759 return true;
1760
1761 return false;
1762}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001763EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001764
Herbert Xua0265d22014-04-17 13:45:03 +08001765int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1766{
Willem de Bruijnbbbf2df2015-06-08 11:53:08 -04001767 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1768 unlikely(!is_skb_forwardable(dev, skb))) {
Herbert Xua0265d22014-04-17 13:45:03 +08001769 atomic_long_inc(&dev->rx_dropped);
1770 kfree_skb(skb);
1771 return NET_RX_DROP;
1772 }
1773
1774 skb_scrub_packet(skb, true);
WANG Cong08b4b8e2015-03-20 14:29:09 -07001775 skb->priority = 0;
Herbert Xua0265d22014-04-17 13:45:03 +08001776 skb->protocol = eth_type_trans(skb, dev);
Jay Vosburgh2c26d342014-12-19 15:32:00 -08001777 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001778
1779 return 0;
1780}
1781EXPORT_SYMBOL_GPL(__dev_forward_skb);
1782
Arnd Bergmann44540962009-11-26 06:07:08 +00001783/**
1784 * dev_forward_skb - loopback an skb to another netif
1785 *
1786 * @dev: destination network device
1787 * @skb: buffer to forward
1788 *
1789 * return values:
1790 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001791 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001792 *
1793 * dev_forward_skb can be used for injecting an skb from the
1794 * start_xmit function of one device into the receive queue
1795 * of another device.
1796 *
1797 * The receiving device may be in another namespace, so
1798 * we have to clear all information in the skb that could
1799 * impact namespace isolation.
1800 */
1801int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1802{
Herbert Xua0265d22014-04-17 13:45:03 +08001803 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001804}
1805EXPORT_SYMBOL_GPL(dev_forward_skb);
1806
Changli Gao71d9dec2010-12-15 19:57:25 +00001807static inline int deliver_skb(struct sk_buff *skb,
1808 struct packet_type *pt_prev,
1809 struct net_device *orig_dev)
1810{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001811 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1812 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001813 atomic_inc(&skb->users);
1814 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1815}
1816
Salam Noureddine7866a622015-01-27 11:35:48 -08001817static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1818 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001819 struct net_device *orig_dev,
1820 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001821 struct list_head *ptype_list)
1822{
1823 struct packet_type *ptype, *pt_prev = *pt;
1824
1825 list_for_each_entry_rcu(ptype, ptype_list, list) {
1826 if (ptype->type != type)
1827 continue;
1828 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001829 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001830 pt_prev = ptype;
1831 }
1832 *pt = pt_prev;
1833}
1834
Eric Leblondc0de08d2012-08-16 22:02:58 +00001835static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1836{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001837 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001838 return false;
1839
1840 if (ptype->id_match)
1841 return ptype->id_match(ptype, skb->sk);
1842 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1843 return true;
1844
1845 return false;
1846}
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848/*
1849 * Support routine. Sends outgoing frames to any network
1850 * taps currently in use.
1851 */
1852
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001853static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854{
1855 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001856 struct sk_buff *skb2 = NULL;
1857 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001858 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001861again:
1862 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 /* Never send packets back to the socket
1864 * they originated from - MvS (miquels@drinkel.ow.org)
1865 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001866 if (skb_loop_sk(ptype, skb))
1867 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001868
Salam Noureddine7866a622015-01-27 11:35:48 -08001869 if (pt_prev) {
1870 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001871 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001872 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001874
1875 /* need to clone skb, done only once */
1876 skb2 = skb_clone(skb, GFP_ATOMIC);
1877 if (!skb2)
1878 goto out_unlock;
1879
1880 net_timestamp_set(skb2);
1881
1882 /* skb->nh should be correctly
1883 * set by sender, so that the second statement is
1884 * just protection against buggy protocols.
1885 */
1886 skb_reset_mac_header(skb2);
1887
1888 if (skb_network_header(skb2) < skb2->data ||
1889 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1890 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1891 ntohs(skb2->protocol),
1892 dev->name);
1893 skb_reset_network_header(skb2);
1894 }
1895
1896 skb2->transport_header = skb2->network_header;
1897 skb2->pkt_type = PACKET_OUTGOING;
1898 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001900
1901 if (ptype_list == &ptype_all) {
1902 ptype_list = &dev->ptype_all;
1903 goto again;
1904 }
1905out_unlock:
Changli Gao71d9dec2010-12-15 19:57:25 +00001906 if (pt_prev)
1907 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 rcu_read_unlock();
1909}
1910
Ben Hutchings2c530402012-07-10 10:55:09 +00001911/**
1912 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001913 * @dev: Network device
1914 * @txq: number of queues available
1915 *
1916 * If real_num_tx_queues is changed the tc mappings may no longer be
1917 * valid. To resolve this verify the tc mapping remains valid and if
1918 * not NULL the mapping. With no priorities mapping to this
1919 * offset/count pair it will no longer be used. In the worst case TC0
1920 * is invalid nothing can be done so disable priority mappings. If is
1921 * expected that drivers will fix this mapping if they can before
1922 * calling netif_set_real_num_tx_queues.
1923 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001924static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001925{
1926 int i;
1927 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1928
1929 /* If TC0 is invalidated disable TC mapping */
1930 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001931 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001932 dev->num_tc = 0;
1933 return;
1934 }
1935
1936 /* Invalidated prio to tc mappings set to TC0 */
1937 for (i = 1; i < TC_BITMASK + 1; i++) {
1938 int q = netdev_get_prio_tc_map(dev, i);
1939
1940 tc = &dev->tc_to_txq[q];
1941 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001942 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1943 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001944 netdev_set_prio_tc_map(dev, i, 0);
1945 }
1946 }
1947}
1948
Alexander Duyck537c00d2013-01-10 08:57:02 +00001949#ifdef CONFIG_XPS
1950static DEFINE_MUTEX(xps_map_mutex);
1951#define xmap_dereference(P) \
1952 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1953
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001954static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1955 int cpu, u16 index)
1956{
1957 struct xps_map *map = NULL;
1958 int pos;
1959
1960 if (dev_maps)
1961 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1962
1963 for (pos = 0; map && pos < map->len; pos++) {
1964 if (map->queues[pos] == index) {
1965 if (map->len > 1) {
1966 map->queues[pos] = map->queues[--map->len];
1967 } else {
1968 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1969 kfree_rcu(map, rcu);
1970 map = NULL;
1971 }
1972 break;
1973 }
1974 }
1975
1976 return map;
1977}
1978
Alexander Duyck024e9672013-01-10 08:57:46 +00001979static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001980{
1981 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001982 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001983 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001984
1985 mutex_lock(&xps_map_mutex);
1986 dev_maps = xmap_dereference(dev->xps_maps);
1987
1988 if (!dev_maps)
1989 goto out_no_maps;
1990
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001991 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001992 for (i = index; i < dev->num_tx_queues; i++) {
1993 if (!remove_xps_queue(dev_maps, cpu, i))
1994 break;
1995 }
1996 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001997 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001998 }
1999
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002000 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00002001 RCU_INIT_POINTER(dev->xps_maps, NULL);
2002 kfree_rcu(dev_maps, rcu);
2003 }
2004
Alexander Duyck024e9672013-01-10 08:57:46 +00002005 for (i = index; i < dev->num_tx_queues; i++)
2006 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2007 NUMA_NO_NODE);
2008
Alexander Duyck537c00d2013-01-10 08:57:02 +00002009out_no_maps:
2010 mutex_unlock(&xps_map_mutex);
2011}
2012
Alexander Duyck01c5f862013-01-10 08:57:35 +00002013static struct xps_map *expand_xps_map(struct xps_map *map,
2014 int cpu, u16 index)
2015{
2016 struct xps_map *new_map;
2017 int alloc_len = XPS_MIN_MAP_ALLOC;
2018 int i, pos;
2019
2020 for (pos = 0; map && pos < map->len; pos++) {
2021 if (map->queues[pos] != index)
2022 continue;
2023 return map;
2024 }
2025
2026 /* Need to add queue to this CPU's existing map */
2027 if (map) {
2028 if (pos < map->alloc_len)
2029 return map;
2030
2031 alloc_len = map->alloc_len * 2;
2032 }
2033
2034 /* Need to allocate new map to store queue on this CPU's map */
2035 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2036 cpu_to_node(cpu));
2037 if (!new_map)
2038 return NULL;
2039
2040 for (i = 0; i < pos; i++)
2041 new_map->queues[i] = map->queues[i];
2042 new_map->alloc_len = alloc_len;
2043 new_map->len = pos;
2044
2045 return new_map;
2046}
2047
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002048int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2049 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002050{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002051 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002052 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002053 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002054 int cpu, numa_node_id = -2;
2055 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002056
2057 mutex_lock(&xps_map_mutex);
2058
2059 dev_maps = xmap_dereference(dev->xps_maps);
2060
Alexander Duyck01c5f862013-01-10 08:57:35 +00002061 /* allocate memory for queue storage */
2062 for_each_online_cpu(cpu) {
2063 if (!cpumask_test_cpu(cpu, mask))
2064 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002065
Alexander Duyck01c5f862013-01-10 08:57:35 +00002066 if (!new_dev_maps)
2067 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002068 if (!new_dev_maps) {
2069 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002070 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002071 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002072
2073 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2074 NULL;
2075
2076 map = expand_xps_map(map, cpu, index);
2077 if (!map)
2078 goto error;
2079
2080 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2081 }
2082
2083 if (!new_dev_maps)
2084 goto out_no_new_maps;
2085
2086 for_each_possible_cpu(cpu) {
2087 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2088 /* add queue to CPU maps */
2089 int pos = 0;
2090
2091 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2092 while ((pos < map->len) && (map->queues[pos] != index))
2093 pos++;
2094
2095 if (pos == map->len)
2096 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002097#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002098 if (numa_node_id == -2)
2099 numa_node_id = cpu_to_node(cpu);
2100 else if (numa_node_id != cpu_to_node(cpu))
2101 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002102#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002103 } else if (dev_maps) {
2104 /* fill in the new device map from the old device map */
2105 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2106 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002107 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002108
Alexander Duyck537c00d2013-01-10 08:57:02 +00002109 }
2110
Alexander Duyck01c5f862013-01-10 08:57:35 +00002111 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2112
Alexander Duyck537c00d2013-01-10 08:57:02 +00002113 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002114 if (dev_maps) {
2115 for_each_possible_cpu(cpu) {
2116 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2117 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2118 if (map && map != new_map)
2119 kfree_rcu(map, rcu);
2120 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002121
Alexander Duyck537c00d2013-01-10 08:57:02 +00002122 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002123 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002124
Alexander Duyck01c5f862013-01-10 08:57:35 +00002125 dev_maps = new_dev_maps;
2126 active = true;
2127
2128out_no_new_maps:
2129 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002130 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2131 (numa_node_id >= 0) ? numa_node_id :
2132 NUMA_NO_NODE);
2133
Alexander Duyck01c5f862013-01-10 08:57:35 +00002134 if (!dev_maps)
2135 goto out_no_maps;
2136
2137 /* removes queue from unused CPUs */
2138 for_each_possible_cpu(cpu) {
2139 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2140 continue;
2141
2142 if (remove_xps_queue(dev_maps, cpu, index))
2143 active = true;
2144 }
2145
2146 /* free map if not active */
2147 if (!active) {
2148 RCU_INIT_POINTER(dev->xps_maps, NULL);
2149 kfree_rcu(dev_maps, rcu);
2150 }
2151
2152out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002153 mutex_unlock(&xps_map_mutex);
2154
2155 return 0;
2156error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002157 /* remove any maps that we added */
2158 for_each_possible_cpu(cpu) {
2159 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2160 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2161 NULL;
2162 if (new_map && new_map != map)
2163 kfree(new_map);
2164 }
2165
Alexander Duyck537c00d2013-01-10 08:57:02 +00002166 mutex_unlock(&xps_map_mutex);
2167
Alexander Duyck537c00d2013-01-10 08:57:02 +00002168 kfree(new_dev_maps);
2169 return -ENOMEM;
2170}
2171EXPORT_SYMBOL(netif_set_xps_queue);
2172
2173#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002174/*
2175 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2176 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2177 */
Tom Herberte6484932010-10-18 18:04:39 +00002178int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002179{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002180 int rc;
2181
Tom Herberte6484932010-10-18 18:04:39 +00002182 if (txq < 1 || txq > dev->num_tx_queues)
2183 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002184
Ben Hutchings5c565802011-02-15 19:39:21 +00002185 if (dev->reg_state == NETREG_REGISTERED ||
2186 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002187 ASSERT_RTNL();
2188
Tom Herbert1d24eb42010-11-21 13:17:27 +00002189 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2190 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002191 if (rc)
2192 return rc;
2193
John Fastabend4f57c082011-01-17 08:06:04 +00002194 if (dev->num_tc)
2195 netif_setup_tc(dev, txq);
2196
Alexander Duyck024e9672013-01-10 08:57:46 +00002197 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002198 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002199#ifdef CONFIG_XPS
2200 netif_reset_xps_queues_gt(dev, txq);
2201#endif
2202 }
John Fastabendf0796d52010-07-01 13:21:57 +00002203 }
Tom Herberte6484932010-10-18 18:04:39 +00002204
2205 dev->real_num_tx_queues = txq;
2206 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002207}
2208EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002209
Michael Daltona953be52014-01-16 22:23:28 -08002210#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002211/**
2212 * netif_set_real_num_rx_queues - set actual number of RX queues used
2213 * @dev: Network device
2214 * @rxq: Actual number of RX queues
2215 *
2216 * This must be called either with the rtnl_lock held or before
2217 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002218 * negative error code. If called before registration, it always
2219 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002220 */
2221int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2222{
2223 int rc;
2224
Tom Herbertbd25fa72010-10-18 18:00:16 +00002225 if (rxq < 1 || rxq > dev->num_rx_queues)
2226 return -EINVAL;
2227
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002228 if (dev->reg_state == NETREG_REGISTERED) {
2229 ASSERT_RTNL();
2230
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002231 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2232 rxq);
2233 if (rc)
2234 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002235 }
2236
2237 dev->real_num_rx_queues = rxq;
2238 return 0;
2239}
2240EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2241#endif
2242
Ben Hutchings2c530402012-07-10 10:55:09 +00002243/**
2244 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002245 *
2246 * This routine should set an upper limit on the number of RSS queues
2247 * used by default by multiqueue devices.
2248 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002249int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002250{
2251 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2252}
2253EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2254
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002255static inline void __netif_reschedule(struct Qdisc *q)
2256{
2257 struct softnet_data *sd;
2258 unsigned long flags;
2259
2260 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002261 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002262 q->next_sched = NULL;
2263 *sd->output_queue_tailp = q;
2264 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002265 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2266 local_irq_restore(flags);
2267}
2268
David S. Miller37437bb2008-07-16 02:15:04 -07002269void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002270{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002271 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2272 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002273}
2274EXPORT_SYMBOL(__netif_schedule);
2275
Eric Dumazete6247022013-12-05 04:45:08 -08002276struct dev_kfree_skb_cb {
2277 enum skb_free_reason reason;
2278};
2279
2280static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002281{
Eric Dumazete6247022013-12-05 04:45:08 -08002282 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002283}
Denis Vlasenko56079432006-03-29 15:57:29 -08002284
John Fastabend46e5da42014-09-12 20:04:52 -07002285void netif_schedule_queue(struct netdev_queue *txq)
2286{
2287 rcu_read_lock();
2288 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2289 struct Qdisc *q = rcu_dereference(txq->qdisc);
2290
2291 __netif_schedule(q);
2292 }
2293 rcu_read_unlock();
2294}
2295EXPORT_SYMBOL(netif_schedule_queue);
2296
2297/**
2298 * netif_wake_subqueue - allow sending packets on subqueue
2299 * @dev: network device
2300 * @queue_index: sub queue index
2301 *
2302 * Resume individual transmit queue of a device with multiple transmit queues.
2303 */
2304void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2305{
2306 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2307
2308 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2309 struct Qdisc *q;
2310
2311 rcu_read_lock();
2312 q = rcu_dereference(txq->qdisc);
2313 __netif_schedule(q);
2314 rcu_read_unlock();
2315 }
2316}
2317EXPORT_SYMBOL(netif_wake_subqueue);
2318
2319void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2320{
2321 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2322 struct Qdisc *q;
2323
2324 rcu_read_lock();
2325 q = rcu_dereference(dev_queue->qdisc);
2326 __netif_schedule(q);
2327 rcu_read_unlock();
2328 }
2329}
2330EXPORT_SYMBOL(netif_tx_wake_queue);
2331
Eric Dumazete6247022013-12-05 04:45:08 -08002332void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2333{
2334 unsigned long flags;
2335
2336 if (likely(atomic_read(&skb->users) == 1)) {
2337 smp_rmb();
2338 atomic_set(&skb->users, 0);
2339 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2340 return;
2341 }
2342 get_kfree_skb_cb(skb)->reason = reason;
2343 local_irq_save(flags);
2344 skb->next = __this_cpu_read(softnet_data.completion_queue);
2345 __this_cpu_write(softnet_data.completion_queue, skb);
2346 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2347 local_irq_restore(flags);
2348}
2349EXPORT_SYMBOL(__dev_kfree_skb_irq);
2350
2351void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002352{
2353 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002354 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002355 else
2356 dev_kfree_skb(skb);
2357}
Eric Dumazete6247022013-12-05 04:45:08 -08002358EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002359
2360
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002361/**
2362 * netif_device_detach - mark device as removed
2363 * @dev: network device
2364 *
2365 * Mark device as removed from system and therefore no longer available.
2366 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002367void netif_device_detach(struct net_device *dev)
2368{
2369 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2370 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002371 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002372 }
2373}
2374EXPORT_SYMBOL(netif_device_detach);
2375
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002376/**
2377 * netif_device_attach - mark device as attached
2378 * @dev: network device
2379 *
2380 * Mark device as attached from system and restart if needed.
2381 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002382void netif_device_attach(struct net_device *dev)
2383{
2384 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2385 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002386 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002387 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002388 }
2389}
2390EXPORT_SYMBOL(netif_device_attach);
2391
Jiri Pirko5605c762015-05-12 14:56:12 +02002392/*
2393 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2394 * to be used as a distribution range.
2395 */
2396u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2397 unsigned int num_tx_queues)
2398{
2399 u32 hash;
2400 u16 qoffset = 0;
2401 u16 qcount = num_tx_queues;
2402
2403 if (skb_rx_queue_recorded(skb)) {
2404 hash = skb_get_rx_queue(skb);
2405 while (unlikely(hash >= num_tx_queues))
2406 hash -= num_tx_queues;
2407 return hash;
2408 }
2409
2410 if (dev->num_tc) {
2411 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2412 qoffset = dev->tc_to_txq[tc].offset;
2413 qcount = dev->tc_to_txq[tc].count;
2414 }
2415
2416 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2417}
2418EXPORT_SYMBOL(__skb_tx_hash);
2419
Ben Hutchings36c92472012-01-17 07:57:56 +00002420static void skb_warn_bad_offload(const struct sk_buff *skb)
2421{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002422 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002423 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01002424 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00002425
Ben Greearc846ad92013-04-19 10:45:52 +00002426 if (!net_ratelimit())
2427 return;
2428
Bjørn Mork88ad4172015-11-16 19:16:40 +01002429 if (dev) {
2430 if (dev->dev.parent)
2431 name = dev_driver_string(dev->dev.parent);
2432 else
2433 name = netdev_name(dev);
2434 }
Ben Hutchings36c92472012-01-17 07:57:56 +00002435 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2436 "gso_type=%d ip_summed=%d\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01002437 name, dev ? &dev->features : &null_features,
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002438 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002439 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2440 skb_shinfo(skb)->gso_type, skb->ip_summed);
2441}
2442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443/*
2444 * Invalidate hardware checksum when packet is to be mangled, and
2445 * complete checksum manually on outgoing path.
2446 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002447int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448{
Al Virod3bc23e2006-11-14 21:24:49 -08002449 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002450 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451
Patrick McHardy84fa7932006-08-29 16:44:56 -07002452 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002453 goto out_set_summed;
2454
2455 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002456 skb_warn_bad_offload(skb);
2457 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 }
2459
Eric Dumazetcef401d2013-01-25 20:34:37 +00002460 /* Before computing a checksum, we should make sure no frag could
2461 * be modified by an external entity : checksum could be wrong.
2462 */
2463 if (skb_has_shared_frag(skb)) {
2464 ret = __skb_linearize(skb);
2465 if (ret)
2466 goto out;
2467 }
2468
Michał Mirosław55508d62010-12-14 15:24:08 +00002469 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002470 BUG_ON(offset >= skb_headlen(skb));
2471 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2472
2473 offset += skb->csum_offset;
2474 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2475
2476 if (skb_cloned(skb) &&
2477 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479 if (ret)
2480 goto out;
2481 }
2482
Herbert Xua0308472007-10-15 01:47:15 -07002483 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002484out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002486out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 return ret;
2488}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002489EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490
Tom Herbert6ae23ad2015-12-14 11:19:46 -08002491/* skb_csum_offload_check - Driver helper function to determine if a device
2492 * with limited checksum offload capabilities is able to offload the checksum
2493 * for a given packet.
2494 *
2495 * Arguments:
2496 * skb - sk_buff for the packet in question
2497 * spec - contains the description of what device can offload
2498 * csum_encapped - returns true if the checksum being offloaded is
2499 * encpasulated. That is it is checksum for the transport header
2500 * in the inner headers.
2501 * checksum_help - when set indicates that helper function should
2502 * call skb_checksum_help if offload checks fail
2503 *
2504 * Returns:
2505 * true: Packet has passed the checksum checks and should be offloadable to
2506 * the device (a driver may still need to check for additional
2507 * restrictions of its device)
2508 * false: Checksum is not offloadable. If checksum_help was set then
2509 * skb_checksum_help was called to resolve checksum for non-GSO
2510 * packets and when IP protocol is not SCTP
2511 */
2512bool __skb_csum_offload_chk(struct sk_buff *skb,
2513 const struct skb_csum_offl_spec *spec,
2514 bool *csum_encapped,
2515 bool csum_help)
2516{
2517 struct iphdr *iph;
2518 struct ipv6hdr *ipv6;
2519 void *nhdr;
2520 int protocol;
2521 u8 ip_proto;
2522
2523 if (skb->protocol == htons(ETH_P_8021Q) ||
2524 skb->protocol == htons(ETH_P_8021AD)) {
2525 if (!spec->vlan_okay)
2526 goto need_help;
2527 }
2528
2529 /* We check whether the checksum refers to a transport layer checksum in
2530 * the outermost header or an encapsulated transport layer checksum that
2531 * corresponds to the inner headers of the skb. If the checksum is for
2532 * something else in the packet we need help.
2533 */
2534 if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) {
2535 /* Non-encapsulated checksum */
2536 protocol = eproto_to_ipproto(vlan_get_protocol(skb));
2537 nhdr = skb_network_header(skb);
2538 *csum_encapped = false;
2539 if (spec->no_not_encapped)
2540 goto need_help;
2541 } else if (skb->encapsulation && spec->encap_okay &&
2542 skb_checksum_start_offset(skb) ==
2543 skb_inner_transport_offset(skb)) {
2544 /* Encapsulated checksum */
2545 *csum_encapped = true;
2546 switch (skb->inner_protocol_type) {
2547 case ENCAP_TYPE_ETHER:
2548 protocol = eproto_to_ipproto(skb->inner_protocol);
2549 break;
2550 case ENCAP_TYPE_IPPROTO:
2551 protocol = skb->inner_protocol;
2552 break;
2553 }
2554 nhdr = skb_inner_network_header(skb);
2555 } else {
2556 goto need_help;
2557 }
2558
2559 switch (protocol) {
2560 case IPPROTO_IP:
2561 if (!spec->ipv4_okay)
2562 goto need_help;
2563 iph = nhdr;
2564 ip_proto = iph->protocol;
2565 if (iph->ihl != 5 && !spec->ip_options_okay)
2566 goto need_help;
2567 break;
2568 case IPPROTO_IPV6:
2569 if (!spec->ipv6_okay)
2570 goto need_help;
2571 if (spec->no_encapped_ipv6 && *csum_encapped)
2572 goto need_help;
2573 ipv6 = nhdr;
2574 nhdr += sizeof(*ipv6);
2575 ip_proto = ipv6->nexthdr;
2576 break;
2577 default:
2578 goto need_help;
2579 }
2580
2581ip_proto_again:
2582 switch (ip_proto) {
2583 case IPPROTO_TCP:
2584 if (!spec->tcp_okay ||
2585 skb->csum_offset != offsetof(struct tcphdr, check))
2586 goto need_help;
2587 break;
2588 case IPPROTO_UDP:
2589 if (!spec->udp_okay ||
2590 skb->csum_offset != offsetof(struct udphdr, check))
2591 goto need_help;
2592 break;
2593 case IPPROTO_SCTP:
2594 if (!spec->sctp_okay ||
2595 skb->csum_offset != offsetof(struct sctphdr, checksum))
2596 goto cant_help;
2597 break;
2598 case NEXTHDR_HOP:
2599 case NEXTHDR_ROUTING:
2600 case NEXTHDR_DEST: {
2601 u8 *opthdr = nhdr;
2602
2603 if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay)
2604 goto need_help;
2605
2606 ip_proto = opthdr[0];
2607 nhdr += (opthdr[1] + 1) << 3;
2608
2609 goto ip_proto_again;
2610 }
2611 default:
2612 goto need_help;
2613 }
2614
2615 /* Passed the tests for offloading checksum */
2616 return true;
2617
2618need_help:
2619 if (csum_help && !skb_shinfo(skb)->gso_size)
2620 skb_checksum_help(skb);
2621cant_help:
2622 return false;
2623}
2624EXPORT_SYMBOL(__skb_csum_offload_chk);
2625
Vlad Yasevich53d64712014-03-27 17:26:18 -04002626__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002627{
2628 __be16 type = skb->protocol;
2629
Pravin B Shelar19acc322013-05-07 20:41:07 +00002630 /* Tunnel gso handlers can set protocol to ethernet. */
2631 if (type == htons(ETH_P_TEB)) {
2632 struct ethhdr *eth;
2633
2634 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2635 return 0;
2636
2637 eth = (struct ethhdr *)skb_mac_header(skb);
2638 type = eth->h_proto;
2639 }
2640
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002641 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002642}
2643
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002644/**
2645 * skb_mac_gso_segment - mac layer segmentation handler.
2646 * @skb: buffer to segment
2647 * @features: features for the output path (see dev->features)
2648 */
2649struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2650 netdev_features_t features)
2651{
2652 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2653 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002654 int vlan_depth = skb->mac_len;
2655 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002656
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002657 if (unlikely(!type))
2658 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002659
Vlad Yasevich53d64712014-03-27 17:26:18 -04002660 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002661
2662 rcu_read_lock();
2663 list_for_each_entry_rcu(ptype, &offload_base, list) {
2664 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002665 segs = ptype->callbacks.gso_segment(skb, features);
2666 break;
2667 }
2668 }
2669 rcu_read_unlock();
2670
2671 __skb_push(skb, skb->data - skb_mac_header(skb));
2672
2673 return segs;
2674}
2675EXPORT_SYMBOL(skb_mac_gso_segment);
2676
2677
Cong Wang12b00042013-02-05 16:36:38 +00002678/* openvswitch calls this on rx path, so we need a different check.
2679 */
2680static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2681{
2682 if (tx_path)
2683 return skb->ip_summed != CHECKSUM_PARTIAL;
2684 else
2685 return skb->ip_summed == CHECKSUM_NONE;
2686}
2687
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002688/**
Cong Wang12b00042013-02-05 16:36:38 +00002689 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002690 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002691 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002692 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002693 *
2694 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002695 *
2696 * It may return NULL if the skb requires no segmentation. This is
2697 * only possible when GSO is used for verifying header integrity.
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002698 *
2699 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002700 */
Cong Wang12b00042013-02-05 16:36:38 +00002701struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2702 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002703{
Cong Wang12b00042013-02-05 16:36:38 +00002704 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002705 int err;
2706
Ben Hutchings36c92472012-01-17 07:57:56 +00002707 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002708
françois romieua40e0a62014-07-15 23:55:35 +02002709 err = skb_cow_head(skb, 0);
2710 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002711 return ERR_PTR(err);
2712 }
2713
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002714 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2715 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2716
Pravin B Shelar68c33162013-02-14 14:02:41 +00002717 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002718 SKB_GSO_CB(skb)->encap_level = 0;
2719
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002720 skb_reset_mac_header(skb);
2721 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002722
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002723 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002724}
Cong Wang12b00042013-02-05 16:36:38 +00002725EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002726
Herbert Xufb286bb2005-11-10 13:01:24 -08002727/* Take action when hardware reception checksum errors are detected. */
2728#ifdef CONFIG_BUG
2729void netdev_rx_csum_fault(struct net_device *dev)
2730{
2731 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002732 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002733 dump_stack();
2734 }
2735}
2736EXPORT_SYMBOL(netdev_rx_csum_fault);
2737#endif
2738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739/* Actually, we should eliminate this check as soon as we know, that:
2740 * 1. IOMMU is present and allows to map all the memory.
2741 * 2. No high memory really exists on this machine.
2742 */
2743
Florian Westphalc1e756b2014-05-05 15:00:44 +02002744static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002746#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002748 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002749 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2750 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2751 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002752 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002753 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002756 if (PCI_DMA_BUS_IS_PHYS) {
2757 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758
Eric Dumazet9092c652010-04-02 13:34:49 -07002759 if (!pdev)
2760 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002761 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002762 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2763 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002764 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2765 return 1;
2766 }
2767 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002768#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 return 0;
2770}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
Simon Horman3b392dd2014-06-04 08:53:17 +09002772/* If MPLS offload request, verify we are testing hardware MPLS features
2773 * instead of standard features for the netdev.
2774 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002775#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002776static netdev_features_t net_mpls_features(struct sk_buff *skb,
2777 netdev_features_t features,
2778 __be16 type)
2779{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002780 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002781 features &= skb->dev->mpls_features;
2782
2783 return features;
2784}
2785#else
2786static netdev_features_t net_mpls_features(struct sk_buff *skb,
2787 netdev_features_t features,
2788 __be16 type)
2789{
2790 return features;
2791}
2792#endif
2793
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002794static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002795 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002796{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002797 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002798 __be16 type;
2799
2800 type = skb_network_protocol(skb, &tmp);
2801 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002802
Ed Cashinc0d680e2012-09-19 15:49:00 +00002803 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002804 !can_checksum_protocol(features, type)) {
Tom Herberta1882222015-12-14 11:19:43 -08002805 features &= ~NETIF_F_CSUM_MASK;
Florian Westphalc1e756b2014-05-05 15:00:44 +02002806 } else if (illegal_highdma(skb->dev, skb)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002807 features &= ~NETIF_F_SG;
2808 }
2809
2810 return features;
2811}
2812
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002813netdev_features_t passthru_features_check(struct sk_buff *skb,
2814 struct net_device *dev,
2815 netdev_features_t features)
2816{
2817 return features;
2818}
2819EXPORT_SYMBOL(passthru_features_check);
2820
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002821static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2822 struct net_device *dev,
2823 netdev_features_t features)
2824{
2825 return vlan_features_check(skb, features);
2826}
2827
Florian Westphalc1e756b2014-05-05 15:00:44 +02002828netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002829{
Jesse Gross5f352272014-12-23 22:37:26 -08002830 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002831 netdev_features_t features = dev->features;
2832 u16 gso_segs = skb_shinfo(skb)->gso_segs;
Jesse Gross58e998c2010-10-29 12:14:55 +00002833
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002834 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
Ben Hutchings30b678d2012-07-30 15:57:00 +00002835 features &= ~NETIF_F_GSO_MASK;
2836
Jesse Gross5f352272014-12-23 22:37:26 -08002837 /* If encapsulation offload request, verify we are testing
2838 * hardware encapsulation features instead of standard
2839 * features for the netdev
2840 */
2841 if (skb->encapsulation)
2842 features &= dev->hw_enc_features;
2843
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09002844 if (skb_vlan_tagged(skb))
2845 features = netdev_intersect_features(features,
2846 dev->vlan_features |
2847 NETIF_F_HW_VLAN_CTAG_TX |
2848 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00002849
Jesse Gross5f352272014-12-23 22:37:26 -08002850 if (dev->netdev_ops->ndo_features_check)
2851 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2852 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002853 else
2854 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002855
Florian Westphalc1e756b2014-05-05 15:00:44 +02002856 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002857}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002858EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002859
David S. Miller2ea25512014-08-29 21:10:01 -07002860static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07002861 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002862{
David S. Miller2ea25512014-08-29 21:10:01 -07002863 unsigned int len;
2864 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08002865
Salam Noureddine7866a622015-01-27 11:35:48 -08002866 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07002867 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00002868
David S. Miller2ea25512014-08-29 21:10:01 -07002869 len = skb->len;
2870 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07002871 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07002872 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00002873
Patrick McHardy572a9d72009-11-10 06:14:14 +00002874 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002875}
David S. Miller2ea25512014-08-29 21:10:01 -07002876
David S. Miller8dcda222014-09-01 15:06:40 -07002877struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2878 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07002879{
2880 struct sk_buff *skb = first;
2881 int rc = NETDEV_TX_OK;
2882
2883 while (skb) {
2884 struct sk_buff *next = skb->next;
2885
2886 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07002887 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07002888 if (unlikely(!dev_xmit_complete(rc))) {
2889 skb->next = next;
2890 goto out;
2891 }
2892
2893 skb = next;
2894 if (netif_xmit_stopped(txq) && skb) {
2895 rc = NETDEV_TX_BUSY;
2896 break;
2897 }
2898 }
2899
2900out:
2901 *ret = rc;
2902 return skb;
2903}
2904
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07002905static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2906 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07002907{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002908 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01002909 !vlan_hw_offload_capable(features, skb->vlan_proto))
2910 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07002911 return skb;
2912}
2913
Eric Dumazet55a93b32014-10-03 15:31:07 -07002914static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
David S. Millereae3f882014-08-30 15:17:13 -07002915{
2916 netdev_features_t features;
2917
2918 if (skb->next)
2919 return skb;
2920
David S. Millereae3f882014-08-30 15:17:13 -07002921 features = netif_skb_features(skb);
2922 skb = validate_xmit_vlan(skb, features);
2923 if (unlikely(!skb))
2924 goto out_null;
2925
Johannes Berg8b86a612015-04-17 15:45:04 +02002926 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07002927 struct sk_buff *segs;
2928
2929 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08002930 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08002931 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08002932 } else if (segs) {
2933 consume_skb(skb);
2934 skb = segs;
2935 }
David S. Millereae3f882014-08-30 15:17:13 -07002936 } else {
2937 if (skb_needs_linearize(skb, features) &&
2938 __skb_linearize(skb))
2939 goto out_kfree_skb;
2940
2941 /* If packet is not checksummed and device does not
2942 * support checksumming for this protocol, complete
2943 * checksumming here.
2944 */
2945 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2946 if (skb->encapsulation)
2947 skb_set_inner_transport_header(skb,
2948 skb_checksum_start_offset(skb));
2949 else
2950 skb_set_transport_header(skb,
2951 skb_checksum_start_offset(skb));
Tom Herberta1882222015-12-14 11:19:43 -08002952 if (!(features & NETIF_F_CSUM_MASK) &&
David S. Millereae3f882014-08-30 15:17:13 -07002953 skb_checksum_help(skb))
2954 goto out_kfree_skb;
2955 }
2956 }
2957
2958 return skb;
2959
2960out_kfree_skb:
2961 kfree_skb(skb);
2962out_null:
2963 return NULL;
2964}
2965
Eric Dumazet55a93b32014-10-03 15:31:07 -07002966struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2967{
2968 struct sk_buff *next, *head = NULL, *tail;
2969
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07002970 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07002971 next = skb->next;
2972 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07002973
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07002974 /* in case skb wont be segmented, point to itself */
2975 skb->prev = skb;
2976
2977 skb = validate_xmit_skb(skb, dev);
2978 if (!skb)
2979 continue;
2980
2981 if (!head)
2982 head = skb;
2983 else
2984 tail->next = skb;
2985 /* If skb was segmented, skb->prev points to
2986 * the last segment. If not, it still contains skb.
2987 */
2988 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07002989 }
2990 return head;
2991}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002992
Eric Dumazet1def9232013-01-10 12:36:42 +00002993static void qdisc_pkt_len_init(struct sk_buff *skb)
2994{
2995 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2996
2997 qdisc_skb_cb(skb)->pkt_len = skb->len;
2998
2999 /* To get more precise estimation of bytes sent on wire,
3000 * we add to pkt_len the headers size of all segments
3001 */
3002 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08003003 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00003004 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00003005
Eric Dumazet757b8b12013-01-15 21:14:21 -08003006 /* mac layer + network layer */
3007 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3008
3009 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00003010 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3011 hdr_len += tcp_hdrlen(skb);
3012 else
3013 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00003014
3015 if (shinfo->gso_type & SKB_GSO_DODGY)
3016 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3017 shinfo->gso_size);
3018
3019 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00003020 }
3021}
3022
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003023static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3024 struct net_device *dev,
3025 struct netdev_queue *txq)
3026{
3027 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00003028 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003029 int rc;
3030
Eric Dumazeta2da5702011-01-20 03:48:19 +00003031 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003032 /*
3033 * Heuristic to force contended enqueues to serialize on a
3034 * separate lock before trying to get qdisc main lock.
Ying Xue9bf2b8c2014-06-26 15:56:31 +08003035 * This permits __QDISC___STATE_RUNNING owner to get the lock more
3036 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07003037 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00003038 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003039 if (unlikely(contended))
3040 spin_lock(&q->busylock);
3041
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003042 spin_lock(root_lock);
3043 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3044 kfree_skb(skb);
3045 rc = NET_XMIT_DROP;
3046 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07003047 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003048 /*
3049 * This is a work-conserving queue; there are no old skbs
3050 * waiting to be sent out; and the qdisc is not running -
3051 * xmit the skb directly.
3052 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003053
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003054 qdisc_bstats_update(q, skb);
3055
Eric Dumazet55a93b32014-10-03 15:31:07 -07003056 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07003057 if (unlikely(contended)) {
3058 spin_unlock(&q->busylock);
3059 contended = false;
3060 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003061 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003062 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07003063 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003064
3065 rc = NET_XMIT_SUCCESS;
3066 } else {
Eric Dumazeta2da5702011-01-20 03:48:19 +00003067 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07003068 if (qdisc_run_begin(q)) {
3069 if (unlikely(contended)) {
3070 spin_unlock(&q->busylock);
3071 contended = false;
3072 }
3073 __qdisc_run(q);
3074 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003075 }
3076 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07003077 if (unlikely(contended))
3078 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003079 return rc;
3080}
3081
Daniel Borkmann86f85152013-12-29 17:27:11 +01003082#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00003083static void skb_update_prio(struct sk_buff *skb)
3084{
Igor Maravic6977a792011-11-25 07:44:54 +00003085 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00003086
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003087 if (!skb->priority && skb->sk && map) {
Tejun Heo2a56a1f2015-12-07 17:38:52 -05003088 unsigned int prioidx =
3089 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003090
3091 if (prioidx < map->priomap_len)
3092 skb->priority = map->priomap[prioidx];
3093 }
Neil Horman5bc14212011-11-22 05:10:51 +00003094}
3095#else
3096#define skb_update_prio(skb)
3097#endif
3098
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02003099DEFINE_PER_CPU(int, xmit_recursion);
3100EXPORT_SYMBOL(xmit_recursion);
3101
David S. Miller11a766c2010-10-25 12:51:55 -07003102#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07003103
Dave Jonesd29f7492008-07-22 14:09:06 -07003104/**
Michel Machado95603e22012-06-12 10:16:35 +00003105 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003106 * @net: network namespace this loopback is happening in
3107 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00003108 * @skb: buffer to transmit
3109 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003110int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00003111{
3112 skb_reset_mac_header(skb);
3113 __skb_pull(skb, skb_network_offset(skb));
3114 skb->pkt_type = PACKET_LOOPBACK;
3115 skb->ip_summed = CHECKSUM_UNNECESSARY;
3116 WARN_ON(!skb_dst(skb));
3117 skb_dst_force(skb);
3118 netif_rx_ni(skb);
3119 return 0;
3120}
3121EXPORT_SYMBOL(dev_loopback_xmit);
3122
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003123#ifdef CONFIG_NET_EGRESS
3124static struct sk_buff *
3125sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3126{
3127 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3128 struct tcf_result cl_res;
3129
3130 if (!cl)
3131 return skb;
3132
3133 /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
3134 * earlier by the caller.
3135 */
3136 qdisc_bstats_cpu_update(cl->q, skb);
3137
3138 switch (tc_classify(skb, cl, &cl_res, false)) {
3139 case TC_ACT_OK:
3140 case TC_ACT_RECLASSIFY:
3141 skb->tc_index = TC_H_MIN(cl_res.classid);
3142 break;
3143 case TC_ACT_SHOT:
3144 qdisc_qstats_cpu_drop(cl->q);
3145 *ret = NET_XMIT_DROP;
3146 goto drop;
3147 case TC_ACT_STOLEN:
3148 case TC_ACT_QUEUED:
3149 *ret = NET_XMIT_SUCCESS;
3150drop:
3151 kfree_skb(skb);
3152 return NULL;
3153 case TC_ACT_REDIRECT:
3154 /* No need to push/pop skb's mac_header here on egress! */
3155 skb_do_redirect(skb);
3156 *ret = NET_XMIT_SUCCESS;
3157 return NULL;
3158 default:
3159 break;
3160 }
3161
3162 return skb;
3163}
3164#endif /* CONFIG_NET_EGRESS */
3165
Jiri Pirko638b2a62015-05-12 14:56:13 +02003166static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3167{
3168#ifdef CONFIG_XPS
3169 struct xps_dev_maps *dev_maps;
3170 struct xps_map *map;
3171 int queue_index = -1;
3172
3173 rcu_read_lock();
3174 dev_maps = rcu_dereference(dev->xps_maps);
3175 if (dev_maps) {
3176 map = rcu_dereference(
3177 dev_maps->cpu_map[skb->sender_cpu - 1]);
3178 if (map) {
3179 if (map->len == 1)
3180 queue_index = map->queues[0];
3181 else
3182 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3183 map->len)];
3184 if (unlikely(queue_index >= dev->real_num_tx_queues))
3185 queue_index = -1;
3186 }
3187 }
3188 rcu_read_unlock();
3189
3190 return queue_index;
3191#else
3192 return -1;
3193#endif
3194}
3195
3196static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3197{
3198 struct sock *sk = skb->sk;
3199 int queue_index = sk_tx_queue_get(sk);
3200
3201 if (queue_index < 0 || skb->ooo_okay ||
3202 queue_index >= dev->real_num_tx_queues) {
3203 int new_index = get_xps_queue(dev, skb);
3204 if (new_index < 0)
3205 new_index = skb_tx_hash(dev, skb);
3206
3207 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003208 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003209 rcu_access_pointer(sk->sk_dst_cache))
3210 sk_tx_queue_set(sk, new_index);
3211
3212 queue_index = new_index;
3213 }
3214
3215 return queue_index;
3216}
3217
3218struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3219 struct sk_buff *skb,
3220 void *accel_priv)
3221{
3222 int queue_index = 0;
3223
3224#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08003225 u32 sender_cpu = skb->sender_cpu - 1;
3226
3227 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003228 skb->sender_cpu = raw_smp_processor_id() + 1;
3229#endif
3230
3231 if (dev->real_num_tx_queues != 1) {
3232 const struct net_device_ops *ops = dev->netdev_ops;
3233 if (ops->ndo_select_queue)
3234 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3235 __netdev_pick_tx);
3236 else
3237 queue_index = __netdev_pick_tx(dev, skb);
3238
3239 if (!accel_priv)
3240 queue_index = netdev_cap_txqueue(dev, queue_index);
3241 }
3242
3243 skb_set_queue_mapping(skb, queue_index);
3244 return netdev_get_tx_queue(dev, queue_index);
3245}
3246
Michel Machado95603e22012-06-12 10:16:35 +00003247/**
Jason Wang9d08dd32014-01-20 11:25:13 +08003248 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07003249 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08003250 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07003251 *
3252 * Queue a buffer for transmission to a network device. The caller must
3253 * have set the device and priority and built the buffer before calling
3254 * this function. The function can be called from an interrupt.
3255 *
3256 * A negative errno code is returned on a failure. A success does not
3257 * guarantee the frame will be transmitted as it may be dropped due
3258 * to congestion or traffic shaping.
3259 *
3260 * -----------------------------------------------------------------------------------
3261 * I notice this method can also return errors from the queue disciplines,
3262 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3263 * be positive.
3264 *
3265 * Regardless of the return value, the skb is consumed, so it is currently
3266 * difficult to retry a send to this method. (You can bump the ref count
3267 * before sending to hold a reference for retry if you are careful.)
3268 *
3269 * When calling this method, interrupts MUST be enabled. This is because
3270 * the BH enable code must have IRQs enabled so that it will not deadlock.
3271 * --BLG
3272 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05303273static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274{
3275 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07003276 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 struct Qdisc *q;
3278 int rc = -ENOMEM;
3279
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00003280 skb_reset_mac_header(skb);
3281
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003282 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3283 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3284
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003285 /* Disable soft irqs for various locks below. Also
3286 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003288 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
Neil Horman5bc14212011-11-22 05:10:51 +00003290 skb_update_prio(skb);
3291
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003292 qdisc_pkt_len_init(skb);
3293#ifdef CONFIG_NET_CLS_ACT
3294 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
3295# ifdef CONFIG_NET_EGRESS
3296 if (static_key_false(&egress_needed)) {
3297 skb = sch_handle_egress(skb, &rc, dev);
3298 if (!skb)
3299 goto out;
3300 }
3301# endif
3302#endif
Eric Dumazet02875872014-10-05 18:38:35 -07003303 /* If device/qdisc don't need skb->dst, release it right now while
3304 * its hot in this cpu cache.
3305 */
3306 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3307 skb_dst_drop(skb);
3308 else
3309 skb_dst_force(skb);
3310
Scott Feldman0c4f6912015-07-18 18:24:48 -07003311#ifdef CONFIG_NET_SWITCHDEV
3312 /* Don't forward if offload device already forwarded */
3313 if (skb->offload_fwd_mark &&
3314 skb->offload_fwd_mark == dev->offload_fwd_mark) {
3315 consume_skb(skb);
3316 rc = NET_XMIT_SUCCESS;
3317 goto out;
3318 }
3319#endif
3320
Jason Wangf663dd92014-01-10 16:18:26 +08003321 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08003322 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07003323
Koki Sanagicf66ba52010-08-23 18:45:02 +09003324 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003326 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07003327 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 }
3329
3330 /* The device has no queue. Common case for software devices:
3331 loopback, all the sorts of tunnels...
3332
Herbert Xu932ff272006-06-09 12:20:56 -07003333 Really, it is unlikely that netif_tx_lock protection is necessary
3334 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 counters.)
3336 However, it is possible, that they rely on protection
3337 made by us here.
3338
3339 Check this and shot the lock. It is not prone from deadlocks.
3340 Either shot noqueue qdisc, it is even simpler 8)
3341 */
3342 if (dev->flags & IFF_UP) {
3343 int cpu = smp_processor_id(); /* ok because BHs are off */
3344
David S. Millerc773e842008-07-08 23:13:53 -07003345 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
Eric Dumazet745e20f2010-09-29 13:23:09 -07003347 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3348 goto recursion_alert;
3349
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003350 skb = validate_xmit_skb(skb, dev);
3351 if (!skb)
3352 goto drop;
3353
David S. Millerc773e842008-07-08 23:13:53 -07003354 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355
Tom Herbert734664982011-11-28 16:32:44 +00003356 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07003357 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07003358 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07003359 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00003360 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003361 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 goto out;
3363 }
3364 }
David S. Millerc773e842008-07-08 23:13:53 -07003365 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003366 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3367 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 } else {
3369 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003370 * unfortunately
3371 */
3372recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003373 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3374 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375 }
3376 }
3377
3378 rc = -ENETDOWN;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003379drop:
Herbert Xud4828d82006-06-22 02:28:18 -07003380 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381
Eric Dumazet015f0682014-03-27 08:45:56 -07003382 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003383 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 return rc;
3385out:
Herbert Xud4828d82006-06-22 02:28:18 -07003386 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 return rc;
3388}
Jason Wangf663dd92014-01-10 16:18:26 +08003389
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003390int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003391{
3392 return __dev_queue_xmit(skb, NULL);
3393}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003394EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395
Jason Wangf663dd92014-01-10 16:18:26 +08003396int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3397{
3398 return __dev_queue_xmit(skb, accel_priv);
3399}
3400EXPORT_SYMBOL(dev_queue_xmit_accel);
3401
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402
3403/*=======================================================================
3404 Receiver routines
3405 =======================================================================*/
3406
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003407int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003408EXPORT_SYMBOL(netdev_max_backlog);
3409
Eric Dumazet3b098e22010-05-15 23:57:10 -07003410int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003411int netdev_budget __read_mostly = 300;
3412int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003414/* Called with irq disabled */
3415static inline void ____napi_schedule(struct softnet_data *sd,
3416 struct napi_struct *napi)
3417{
3418 list_add_tail(&napi->poll_list, &sd->poll_list);
3419 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3420}
3421
Eric Dumazetdf334542010-03-24 19:13:54 +00003422#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003423
3424/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003425struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003426EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003427u32 rps_cpu_mask __read_mostly;
3428EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003429
Ingo Molnarc5905af2012-02-24 08:31:31 +01003430struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00003431
Ben Hutchingsc4454772011-01-19 11:03:53 +00003432static struct rps_dev_flow *
3433set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3434 struct rps_dev_flow *rflow, u16 next_cpu)
3435{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003436 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003437#ifdef CONFIG_RFS_ACCEL
3438 struct netdev_rx_queue *rxqueue;
3439 struct rps_dev_flow_table *flow_table;
3440 struct rps_dev_flow *old_rflow;
3441 u32 flow_id;
3442 u16 rxq_index;
3443 int rc;
3444
3445 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003446 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3447 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003448 goto out;
3449 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3450 if (rxq_index == skb_get_rx_queue(skb))
3451 goto out;
3452
3453 rxqueue = dev->_rx + rxq_index;
3454 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3455 if (!flow_table)
3456 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003457 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003458 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3459 rxq_index, flow_id);
3460 if (rc < 0)
3461 goto out;
3462 old_rflow = rflow;
3463 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003464 rflow->filter = rc;
3465 if (old_rflow->filter == rflow->filter)
3466 old_rflow->filter = RPS_NO_FILTER;
3467 out:
3468#endif
3469 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003470 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003471 }
3472
Ben Hutchings09994d12011-10-03 04:42:46 +00003473 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003474 return rflow;
3475}
3476
Tom Herbert0a9627f2010-03-16 08:03:29 +00003477/*
3478 * get_rps_cpu is called from netif_receive_skb and returns the target
3479 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003480 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003481 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003482static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3483 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003484{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003485 const struct rps_sock_flow_table *sock_flow_table;
3486 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003487 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003488 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003489 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003490 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003491 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003492
Tom Herbert0a9627f2010-03-16 08:03:29 +00003493 if (skb_rx_queue_recorded(skb)) {
3494 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003495
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003496 if (unlikely(index >= dev->real_num_rx_queues)) {
3497 WARN_ONCE(dev->real_num_rx_queues > 1,
3498 "%s received packet on queue %u, but number "
3499 "of RX queues is %u\n",
3500 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003501 goto done;
3502 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003503 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003504 }
3505
Eric Dumazet567e4b72015-02-06 12:59:01 -08003506 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3507
3508 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3509 map = rcu_dereference(rxqueue->rps_map);
3510 if (!flow_table && !map)
3511 goto done;
3512
Changli Gao2d47b452010-08-17 19:00:56 +00003513 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003514 hash = skb_get_hash(skb);
3515 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003516 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003517
Tom Herbertfec5e652010-04-16 16:01:27 -07003518 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3519 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003520 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003521 u32 next_cpu;
3522 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003523
Eric Dumazet567e4b72015-02-06 12:59:01 -08003524 /* First check into global flow table if there is a match */
3525 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3526 if ((ident ^ hash) & ~rps_cpu_mask)
3527 goto try_rps;
3528
3529 next_cpu = ident & rps_cpu_mask;
3530
3531 /* OK, now we know there is a match,
3532 * we can look at the local (per receive queue) flow table
3533 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003534 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003535 tcpu = rflow->cpu;
3536
Tom Herbertfec5e652010-04-16 16:01:27 -07003537 /*
3538 * If the desired CPU (where last recvmsg was done) is
3539 * different from current CPU (one in the rx-queue flow
3540 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003541 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003542 * - Current CPU is offline.
3543 * - The current CPU's queue tail has advanced beyond the
3544 * last packet that was enqueued using this table entry.
3545 * This guarantees that all previous packets for the flow
3546 * have been dequeued, thus preserving in order delivery.
3547 */
3548 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003549 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003550 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003551 rflow->last_qtail)) >= 0)) {
3552 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003553 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003554 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003555
Eric Dumazeta31196b2015-04-25 09:35:24 -07003556 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003557 *rflowp = rflow;
3558 cpu = tcpu;
3559 goto done;
3560 }
3561 }
3562
Eric Dumazet567e4b72015-02-06 12:59:01 -08003563try_rps:
3564
Tom Herbert0a9627f2010-03-16 08:03:29 +00003565 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003566 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003567 if (cpu_online(tcpu)) {
3568 cpu = tcpu;
3569 goto done;
3570 }
3571 }
3572
3573done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003574 return cpu;
3575}
3576
Ben Hutchingsc4454772011-01-19 11:03:53 +00003577#ifdef CONFIG_RFS_ACCEL
3578
3579/**
3580 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3581 * @dev: Device on which the filter was set
3582 * @rxq_index: RX queue index
3583 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3584 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3585 *
3586 * Drivers that implement ndo_rx_flow_steer() should periodically call
3587 * this function for each installed filter and remove the filters for
3588 * which it returns %true.
3589 */
3590bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3591 u32 flow_id, u16 filter_id)
3592{
3593 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3594 struct rps_dev_flow_table *flow_table;
3595 struct rps_dev_flow *rflow;
3596 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003597 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003598
3599 rcu_read_lock();
3600 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3601 if (flow_table && flow_id <= flow_table->mask) {
3602 rflow = &flow_table->flows[flow_id];
3603 cpu = ACCESS_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003604 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003605 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3606 rflow->last_qtail) <
3607 (int)(10 * flow_table->mask)))
3608 expire = false;
3609 }
3610 rcu_read_unlock();
3611 return expire;
3612}
3613EXPORT_SYMBOL(rps_may_expire_flow);
3614
3615#endif /* CONFIG_RFS_ACCEL */
3616
Tom Herbert0a9627f2010-03-16 08:03:29 +00003617/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003618static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003619{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003620 struct softnet_data *sd = data;
3621
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003622 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003623 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003624}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003625
Tom Herbertfec5e652010-04-16 16:01:27 -07003626#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003627
3628/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003629 * Check if this softnet_data structure is another cpu one
3630 * If yes, queue it to our IPI list and return 1
3631 * If no, return 0
3632 */
3633static int rps_ipi_queued(struct softnet_data *sd)
3634{
3635#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003636 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003637
3638 if (sd != mysd) {
3639 sd->rps_ipi_next = mysd->rps_ipi_list;
3640 mysd->rps_ipi_list = sd;
3641
3642 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3643 return 1;
3644 }
3645#endif /* CONFIG_RPS */
3646 return 0;
3647}
3648
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003649#ifdef CONFIG_NET_FLOW_LIMIT
3650int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3651#endif
3652
3653static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3654{
3655#ifdef CONFIG_NET_FLOW_LIMIT
3656 struct sd_flow_limit *fl;
3657 struct softnet_data *sd;
3658 unsigned int old_flow, new_flow;
3659
3660 if (qlen < (netdev_max_backlog >> 1))
3661 return false;
3662
Christoph Lameter903ceff2014-08-17 12:30:35 -05003663 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003664
3665 rcu_read_lock();
3666 fl = rcu_dereference(sd->flow_limit);
3667 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003668 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003669 old_flow = fl->history[fl->history_head];
3670 fl->history[fl->history_head] = new_flow;
3671
3672 fl->history_head++;
3673 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3674
3675 if (likely(fl->buckets[old_flow]))
3676 fl->buckets[old_flow]--;
3677
3678 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3679 fl->count++;
3680 rcu_read_unlock();
3681 return true;
3682 }
3683 }
3684 rcu_read_unlock();
3685#endif
3686 return false;
3687}
3688
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003689/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003690 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3691 * queue (may be a remote CPU queue).
3692 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003693static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3694 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003695{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003696 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003697 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003698 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003699
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003700 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003701
3702 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003703
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003704 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003705 if (!netif_running(skb->dev))
3706 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003707 qlen = skb_queue_len(&sd->input_pkt_queue);
3708 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003709 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003710enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003711 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003712 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003713 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003714 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003715 return NET_RX_SUCCESS;
3716 }
3717
Eric Dumazetebda37c22010-05-06 23:51:21 +00003718 /* Schedule NAPI for backlog device
3719 * We can use non atomic operation since we own the queue lock
3720 */
3721 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003722 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003723 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003724 }
3725 goto enqueue;
3726 }
3727
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003728drop:
Changli Gaodee42872010-05-02 05:42:16 +00003729 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003730 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003731
Tom Herbert0a9627f2010-03-16 08:03:29 +00003732 local_irq_restore(flags);
3733
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003734 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003735 kfree_skb(skb);
3736 return NET_RX_DROP;
3737}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003739static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003741 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742
Eric Dumazet588f0332011-11-15 04:12:55 +00003743 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744
Koki Sanagicf66ba52010-08-23 18:45:02 +09003745 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003746#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003747 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003748 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003749 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750
Changli Gaocece1942010-08-07 20:35:43 -07003751 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003752 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003753
3754 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003755 if (cpu < 0)
3756 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003757
3758 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3759
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003760 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003761 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003762 } else
3763#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003764 {
3765 unsigned int qtail;
3766 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3767 put_cpu();
3768 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003769 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003771
3772/**
3773 * netif_rx - post buffer to the network code
3774 * @skb: buffer to post
3775 *
3776 * This function receives a packet from a device driver and queues it for
3777 * the upper (protocol) levels to process. It always succeeds. The buffer
3778 * may be dropped during processing for congestion control or by the
3779 * protocol layers.
3780 *
3781 * return values:
3782 * NET_RX_SUCCESS (no congestion)
3783 * NET_RX_DROP (packet was dropped)
3784 *
3785 */
3786
3787int netif_rx(struct sk_buff *skb)
3788{
3789 trace_netif_rx_entry(skb);
3790
3791 return netif_rx_internal(skb);
3792}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003793EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794
3795int netif_rx_ni(struct sk_buff *skb)
3796{
3797 int err;
3798
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003799 trace_netif_rx_ni_entry(skb);
3800
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003802 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 if (local_softirq_pending())
3804 do_softirq();
3805 preempt_enable();
3806
3807 return err;
3808}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809EXPORT_SYMBOL(netif_rx_ni);
3810
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811static void net_tx_action(struct softirq_action *h)
3812{
Christoph Lameter903ceff2014-08-17 12:30:35 -05003813 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814
3815 if (sd->completion_queue) {
3816 struct sk_buff *clist;
3817
3818 local_irq_disable();
3819 clist = sd->completion_queue;
3820 sd->completion_queue = NULL;
3821 local_irq_enable();
3822
3823 while (clist) {
3824 struct sk_buff *skb = clist;
3825 clist = clist->next;
3826
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003827 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003828 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3829 trace_consume_skb(skb);
3830 else
3831 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 __kfree_skb(skb);
3833 }
3834 }
3835
3836 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003837 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
3839 local_irq_disable();
3840 head = sd->output_queue;
3841 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003842 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843 local_irq_enable();
3844
3845 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003846 struct Qdisc *q = head;
3847 spinlock_t *root_lock;
3848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 head = head->next_sched;
3850
David S. Miller5fb66222008-08-02 20:02:43 -07003851 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003852 if (spin_trylock(root_lock)) {
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003853 smp_mb__before_atomic();
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003854 clear_bit(__QDISC_STATE_SCHED,
3855 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003856 qdisc_run(q);
3857 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003859 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003860 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003861 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003862 } else {
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003863 smp_mb__before_atomic();
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003864 clear_bit(__QDISC_STATE_SCHED,
3865 &q->state);
3866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 }
3868 }
3869 }
3870}
3871
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003872#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3873 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003874/* This hook is defined here for ATM LANE */
3875int (*br_fdb_test_addr_hook)(struct net_device *dev,
3876 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003877EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003878#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003880static inline struct sk_buff *
3881sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3882 struct net_device *orig_dev)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003883{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003884#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003885 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3886 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00003887
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003888 /* If there's at least one ingress present somewhere (so
3889 * we get here via enabled static key), remaining devices
3890 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003891 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003892 */
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003893 if (!cl)
Daniel Borkmann45771392015-04-10 23:07:54 +02003894 return skb;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003895 if (*pt_prev) {
3896 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3897 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003898 }
3899
Florian Westphal33654952015-05-14 00:36:28 +02003900 qdisc_skb_cb(skb)->pkt_len = skb->len;
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003901 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
Eric Dumazet24ea5912015-07-06 05:18:03 -07003902 qdisc_bstats_cpu_update(cl->q, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003903
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02003904 switch (tc_classify(skb, cl, &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003905 case TC_ACT_OK:
3906 case TC_ACT_RECLASSIFY:
3907 skb->tc_index = TC_H_MIN(cl_res.classid);
3908 break;
3909 case TC_ACT_SHOT:
Eric Dumazet24ea5912015-07-06 05:18:03 -07003910 qdisc_qstats_cpu_drop(cl->q);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003911 case TC_ACT_STOLEN:
3912 case TC_ACT_QUEUED:
3913 kfree_skb(skb);
3914 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07003915 case TC_ACT_REDIRECT:
3916 /* skb_mac_header check was done by cls/act_bpf, so
3917 * we can safely push the L2 header back before
3918 * redirecting to another netdev
3919 */
3920 __skb_push(skb, skb->mac_len);
3921 skb_do_redirect(skb);
3922 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003923 default:
3924 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003925 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003926#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07003927 return skb;
3928}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003930/**
3931 * netdev_rx_handler_register - register receive handler
3932 * @dev: device to register a handler for
3933 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003934 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003935 *
Masanari Iidae2278672014-02-18 22:54:36 +09003936 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003937 * called from __netif_receive_skb. A negative errno code is returned
3938 * on a failure.
3939 *
3940 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003941 *
3942 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003943 */
3944int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003945 rx_handler_func_t *rx_handler,
3946 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003947{
3948 ASSERT_RTNL();
3949
3950 if (dev->rx_handler)
3951 return -EBUSY;
3952
Eric Dumazet00cfec32013-03-29 03:01:22 +00003953 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003954 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003955 rcu_assign_pointer(dev->rx_handler, rx_handler);
3956
3957 return 0;
3958}
3959EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3960
3961/**
3962 * netdev_rx_handler_unregister - unregister receive handler
3963 * @dev: device to unregister a handler from
3964 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003965 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003966 *
3967 * The caller must hold the rtnl_mutex.
3968 */
3969void netdev_rx_handler_unregister(struct net_device *dev)
3970{
3971
3972 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003973 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003974 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3975 * section has a guarantee to see a non NULL rx_handler_data
3976 * as well.
3977 */
3978 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003979 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003980}
3981EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3982
Mel Gormanb4b9e352012-07-31 16:44:26 -07003983/*
3984 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3985 * the special handling of PFMEMALLOC skbs.
3986 */
3987static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3988{
3989 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07003990 case htons(ETH_P_ARP):
3991 case htons(ETH_P_IP):
3992 case htons(ETH_P_IPV6):
3993 case htons(ETH_P_8021Q):
3994 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003995 return true;
3996 default:
3997 return false;
3998 }
3999}
4000
Pablo Neirae687ad62015-05-13 18:19:38 +02004001static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4002 int *ret, struct net_device *orig_dev)
4003{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004004#ifdef CONFIG_NETFILTER_INGRESS
Pablo Neirae687ad62015-05-13 18:19:38 +02004005 if (nf_hook_ingress_active(skb)) {
4006 if (*pt_prev) {
4007 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4008 *pt_prev = NULL;
4009 }
4010
4011 return nf_hook_ingress(skb);
4012 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004013#endif /* CONFIG_NETFILTER_INGRESS */
Pablo Neirae687ad62015-05-13 18:19:38 +02004014 return 0;
4015}
Pablo Neirae687ad62015-05-13 18:19:38 +02004016
David S. Miller9754e292013-02-14 15:57:38 -05004017static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018{
4019 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004020 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07004021 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004022 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08004024 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
Eric Dumazet588f0332011-11-15 04:12:55 +00004026 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07004027
Koki Sanagicf66ba52010-08-23 18:45:02 +09004028 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08004029
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07004030 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00004031
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07004032 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00004033 if (!skb_transport_header_was_set(skb))
4034 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00004035 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036
4037 pt_prev = NULL;
4038
David S. Miller63d8ea72011-02-28 10:48:59 -08004039another_round:
David S. Millerb6858172012-07-23 16:27:54 -07004040 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08004041
4042 __this_cpu_inc(softnet_data.processed);
4043
Patrick McHardy8ad227f2013-04-19 02:04:31 +00004044 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4045 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004046 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004047 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004048 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004049 }
4050
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051#ifdef CONFIG_NET_CLS_ACT
4052 if (skb->tc_verd & TC_NCLS) {
4053 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
4054 goto ncls;
4055 }
4056#endif
4057
David S. Miller9754e292013-02-14 15:57:38 -05004058 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07004059 goto skip_taps;
4060
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08004062 if (pt_prev)
4063 ret = deliver_skb(skb, pt_prev, orig_dev);
4064 pt_prev = ptype;
4065 }
4066
4067 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4068 if (pt_prev)
4069 ret = deliver_skb(skb, pt_prev, orig_dev);
4070 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 }
4072
Mel Gormanb4b9e352012-07-31 16:44:26 -07004073skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02004074#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02004075 if (static_key_false(&ingress_needed)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004076 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
Daniel Borkmann45771392015-04-10 23:07:54 +02004077 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004078 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02004079
4080 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004081 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02004082 }
Pablo Neira1cf519002015-05-13 18:19:37 +02004083#endif
4084#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmann45771392015-04-10 23:07:54 +02004085 skb->tc_verd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086ncls:
4087#endif
David S. Miller9754e292013-02-14 15:57:38 -05004088 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07004089 goto drop;
4090
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004091 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00004092 if (pt_prev) {
4093 ret = deliver_skb(skb, pt_prev, orig_dev);
4094 pt_prev = NULL;
4095 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004096 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00004097 goto another_round;
4098 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004099 goto out;
John Fastabend24257172011-10-10 09:16:41 +00004100 }
4101
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004102 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004103 if (rx_handler) {
4104 if (pt_prev) {
4105 ret = deliver_skb(skb, pt_prev, orig_dev);
4106 pt_prev = NULL;
4107 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004108 switch (rx_handler(&skb)) {
4109 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00004110 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004111 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004112 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08004113 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004114 case RX_HANDLER_EXACT:
4115 deliver_exact = true;
4116 case RX_HANDLER_PASS:
4117 break;
4118 default:
4119 BUG();
4120 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004121 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004123 if (unlikely(skb_vlan_tag_present(skb))) {
4124 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07004125 skb->pkt_type = PACKET_OTHERHOST;
4126 /* Note: we might in the future use prio bits
4127 * and set skb->priority like in vlan_do_receive()
4128 * For the time being, just ignore Priority Code Point
4129 */
4130 skb->vlan_tci = 0;
4131 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004132
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08004134
4135 /* deliver only exact match when indicated */
4136 if (likely(!deliver_exact)) {
4137 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4138 &ptype_base[ntohs(type) &
4139 PTYPE_HASH_MASK]);
4140 }
4141
4142 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4143 &orig_dev->ptype_specific);
4144
4145 if (unlikely(skb->dev != orig_dev)) {
4146 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4147 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148 }
4149
4150 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004151 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00004152 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004153 else
4154 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07004156drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00004157 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158 kfree_skb(skb);
4159 /* Jamal, now you will not able to escape explaining
4160 * me how you were going to use this. :-)
4161 */
4162 ret = NET_RX_DROP;
4163 }
4164
Julian Anastasov2c17d272015-07-09 09:59:10 +03004165out:
David S. Miller9754e292013-02-14 15:57:38 -05004166 return ret;
4167}
4168
4169static int __netif_receive_skb(struct sk_buff *skb)
4170{
4171 int ret;
4172
4173 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4174 unsigned long pflags = current->flags;
4175
4176 /*
4177 * PFMEMALLOC skbs are special, they should
4178 * - be delivered to SOCK_MEMALLOC sockets only
4179 * - stay away from userspace
4180 * - have bounded memory usage
4181 *
4182 * Use PF_MEMALLOC as this saves us from propagating the allocation
4183 * context down to all allocation sites.
4184 */
4185 current->flags |= PF_MEMALLOC;
4186 ret = __netif_receive_skb_core(skb, true);
4187 tsk_restore_flags(current, pflags, PF_MEMALLOC);
4188 } else
4189 ret = __netif_receive_skb_core(skb, false);
4190
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 return ret;
4192}
Tom Herbert0a9627f2010-03-16 08:03:29 +00004193
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004194static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004195{
Julian Anastasov2c17d272015-07-09 09:59:10 +03004196 int ret;
4197
Eric Dumazet588f0332011-11-15 04:12:55 +00004198 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07004199
Richard Cochranc1f19b52010-07-17 08:49:36 +00004200 if (skb_defer_rx_timestamp(skb))
4201 return NET_RX_SUCCESS;
4202
Julian Anastasov2c17d272015-07-09 09:59:10 +03004203 rcu_read_lock();
4204
Eric Dumazetdf334542010-03-24 19:13:54 +00004205#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004206 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07004207 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004208 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07004209
Eric Dumazet3b098e22010-05-15 23:57:10 -07004210 if (cpu >= 0) {
4211 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4212 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00004213 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004214 }
Tom Herbertfec5e652010-04-16 16:01:27 -07004215 }
Tom Herbert1e94d722010-03-18 17:45:44 -07004216#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03004217 ret = __netif_receive_skb(skb);
4218 rcu_read_unlock();
4219 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004220}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004221
4222/**
4223 * netif_receive_skb - process receive buffer from network
4224 * @skb: buffer to process
4225 *
4226 * netif_receive_skb() is the main receive data processing function.
4227 * It always succeeds. The buffer may be dropped during processing
4228 * for congestion control or by the protocol layers.
4229 *
4230 * This function may only be called from softirq context and interrupts
4231 * should be enabled.
4232 *
4233 * Return values (usually ignored):
4234 * NET_RX_SUCCESS: no congestion
4235 * NET_RX_DROP: packet was dropped
4236 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004237int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004238{
4239 trace_netif_receive_skb_entry(skb);
4240
4241 return netif_receive_skb_internal(skb);
4242}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004243EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244
Eric Dumazet88751272010-04-19 05:07:33 +00004245/* Network device is going away, flush any packets still pending
4246 * Called with irqs disabled.
4247 */
Changli Gao152102c2010-03-30 20:16:22 +00004248static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004249{
Changli Gao152102c2010-03-30 20:16:22 +00004250 struct net_device *dev = arg;
Christoph Lameter903ceff2014-08-17 12:30:35 -05004251 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004252 struct sk_buff *skb, *tmp;
4253
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004254 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004255 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004256 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004257 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004258 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004259 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004260 }
Changli Gao6e7676c2010-04-27 15:07:33 -07004261 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004262 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004263
4264 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4265 if (skb->dev == dev) {
4266 __skb_unlink(skb, &sd->process_queue);
4267 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004268 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004269 }
4270 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004271}
4272
Herbert Xud565b0a2008-12-15 23:38:52 -08004273static int napi_gro_complete(struct sk_buff *skb)
4274{
Vlad Yasevich22061d82012-11-15 08:49:11 +00004275 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004276 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004277 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08004278 int err = -ENOENT;
4279
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004280 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4281
Herbert Xufc59f9a2009-04-14 15:11:06 -07004282 if (NAPI_GRO_CB(skb)->count == 1) {
4283 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004284 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07004285 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004286
4287 rcu_read_lock();
4288 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004289 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08004290 continue;
4291
Jerry Chu299603e82013-12-11 20:53:45 -08004292 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004293 break;
4294 }
4295 rcu_read_unlock();
4296
4297 if (err) {
4298 WARN_ON(&ptype->list == head);
4299 kfree_skb(skb);
4300 return NET_RX_SUCCESS;
4301 }
4302
4303out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004304 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004305}
4306
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004307/* napi->gro_list contains packets ordered by age.
4308 * youngest packets at the head of it.
4309 * Complete skbs in reverse order to reduce latencies.
4310 */
4311void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08004312{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004313 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004314
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004315 /* scan list and build reverse chain */
4316 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4317 skb->prev = prev;
4318 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08004319 }
4320
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004321 for (skb = prev; skb; skb = prev) {
4322 skb->next = NULL;
4323
4324 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4325 return;
4326
4327 prev = skb->prev;
4328 napi_gro_complete(skb);
4329 napi->gro_count--;
4330 }
4331
Herbert Xud565b0a2008-12-15 23:38:52 -08004332 napi->gro_list = NULL;
4333}
Eric Dumazet86cac582010-08-31 18:25:32 +00004334EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08004335
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004336static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4337{
4338 struct sk_buff *p;
4339 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08004340 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004341
4342 for (p = napi->gro_list; p; p = p->next) {
4343 unsigned long diffs;
4344
Tom Herbert0b4cec82014-01-15 08:58:06 -08004345 NAPI_GRO_CB(p)->flush = 0;
4346
4347 if (hash != skb_get_hash_raw(p)) {
4348 NAPI_GRO_CB(p)->same_flow = 0;
4349 continue;
4350 }
4351
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004352 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4353 diffs |= p->vlan_tci ^ skb->vlan_tci;
Jesse Grossce87fc62016-01-20 17:59:49 -08004354 diffs |= skb_metadata_dst_cmp(p, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004355 if (maclen == ETH_HLEN)
4356 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004357 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004358 else if (!diffs)
4359 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004360 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004361 maclen);
4362 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004363 }
4364}
4365
Jerry Chu299603e82013-12-11 20:53:45 -08004366static void skb_gro_reset_offset(struct sk_buff *skb)
4367{
4368 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4369 const skb_frag_t *frag0 = &pinfo->frags[0];
4370
4371 NAPI_GRO_CB(skb)->data_offset = 0;
4372 NAPI_GRO_CB(skb)->frag0 = NULL;
4373 NAPI_GRO_CB(skb)->frag0_len = 0;
4374
4375 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4376 pinfo->nr_frags &&
4377 !PageHighMem(skb_frag_page(frag0))) {
4378 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4379 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004380 }
4381}
4382
Eric Dumazeta50e2332014-03-29 21:28:21 -07004383static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4384{
4385 struct skb_shared_info *pinfo = skb_shinfo(skb);
4386
4387 BUG_ON(skb->end - skb->tail < grow);
4388
4389 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4390
4391 skb->data_len -= grow;
4392 skb->tail += grow;
4393
4394 pinfo->frags[0].page_offset += grow;
4395 skb_frag_size_sub(&pinfo->frags[0], grow);
4396
4397 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4398 skb_frag_unref(skb, 0);
4399 memmove(pinfo->frags, pinfo->frags + 1,
4400 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4401 }
4402}
4403
Rami Rosenbb728822012-11-28 21:55:25 +00004404static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004405{
4406 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004407 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004408 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004409 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004410 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004411 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004412 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004413
Eric W. Biederman9c62a682014-03-14 20:51:52 -07004414 if (!(skb->dev->features & NETIF_F_GRO))
Herbert Xud565b0a2008-12-15 23:38:52 -08004415 goto normal;
4416
Tom Herbert5a212322014-08-31 15:12:41 -07004417 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
Herbert Xuf17f5c92009-01-14 14:36:12 -08004418 goto normal;
4419
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004420 gro_list_prepare(napi, skb);
4421
Herbert Xud565b0a2008-12-15 23:38:52 -08004422 rcu_read_lock();
4423 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004424 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004425 continue;
4426
Herbert Xu86911732009-01-29 14:19:50 +00004427 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004428 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004429 NAPI_GRO_CB(skb)->same_flow = 0;
4430 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08004431 NAPI_GRO_CB(skb)->free = 0;
Or Gerlitzb582ef02014-01-20 13:59:19 +02004432 NAPI_GRO_CB(skb)->udp_mark = 0;
Tom Herbert15e23962015-02-10 16:30:31 -08004433 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004434
Tom Herbert662880f2014-08-27 21:26:56 -07004435 /* Setup for GRO checksum validation */
4436 switch (skb->ip_summed) {
4437 case CHECKSUM_COMPLETE:
4438 NAPI_GRO_CB(skb)->csum = skb->csum;
4439 NAPI_GRO_CB(skb)->csum_valid = 1;
4440 NAPI_GRO_CB(skb)->csum_cnt = 0;
4441 break;
4442 case CHECKSUM_UNNECESSARY:
4443 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4444 NAPI_GRO_CB(skb)->csum_valid = 0;
4445 break;
4446 default:
4447 NAPI_GRO_CB(skb)->csum_cnt = 0;
4448 NAPI_GRO_CB(skb)->csum_valid = 0;
4449 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004450
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004451 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004452 break;
4453 }
4454 rcu_read_unlock();
4455
4456 if (&ptype->list == head)
4457 goto normal;
4458
Herbert Xu0da2afd52008-12-26 14:57:42 -08004459 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004460 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004461
Herbert Xud565b0a2008-12-15 23:38:52 -08004462 if (pp) {
4463 struct sk_buff *nskb = *pp;
4464
4465 *pp = nskb->next;
4466 nskb->next = NULL;
4467 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00004468 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08004469 }
4470
Herbert Xu0da2afd52008-12-26 14:57:42 -08004471 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08004472 goto ok;
4473
Eric Dumazet600adc12014-01-09 14:12:19 -08004474 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08004475 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08004476
Eric Dumazet600adc12014-01-09 14:12:19 -08004477 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4478 struct sk_buff *nskb = napi->gro_list;
4479
4480 /* locate the end of the list to select the 'oldest' flow */
4481 while (nskb->next) {
4482 pp = &nskb->next;
4483 nskb = *pp;
4484 }
4485 *pp = NULL;
4486 nskb->next = NULL;
4487 napi_gro_complete(nskb);
4488 } else {
4489 napi->gro_count++;
4490 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004491 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004492 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004493 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004494 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004495 skb->next = napi->gro_list;
4496 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004497 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004498
Herbert Xuad0f9902009-02-01 01:24:55 -08004499pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004500 grow = skb_gro_offset(skb) - skb_headlen(skb);
4501 if (grow > 0)
4502 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004503ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004504 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004505
4506normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004507 ret = GRO_NORMAL;
4508 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004509}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004510
Jerry Chubf5a7552014-01-07 10:23:19 -08004511struct packet_offload *gro_find_receive_by_type(__be16 type)
4512{
4513 struct list_head *offload_head = &offload_base;
4514 struct packet_offload *ptype;
4515
4516 list_for_each_entry_rcu(ptype, offload_head, list) {
4517 if (ptype->type != type || !ptype->callbacks.gro_receive)
4518 continue;
4519 return ptype;
4520 }
4521 return NULL;
4522}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004523EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004524
4525struct packet_offload *gro_find_complete_by_type(__be16 type)
4526{
4527 struct list_head *offload_head = &offload_base;
4528 struct packet_offload *ptype;
4529
4530 list_for_each_entry_rcu(ptype, offload_head, list) {
4531 if (ptype->type != type || !ptype->callbacks.gro_complete)
4532 continue;
4533 return ptype;
4534 }
4535 return NULL;
4536}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004537EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004538
Rami Rosenbb728822012-11-28 21:55:25 +00004539static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004540{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004541 switch (ret) {
4542 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004543 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004544 ret = GRO_DROP;
4545 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004546
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004547 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004548 kfree_skb(skb);
4549 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004550
Eric Dumazetdaa86542012-04-19 07:07:40 +00004551 case GRO_MERGED_FREE:
Jesse Grossce87fc62016-01-20 17:59:49 -08004552 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4553 skb_dst_drop(skb);
Eric Dumazetd7e88832012-04-30 08:10:34 +00004554 kmem_cache_free(skbuff_head_cache, skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004555 } else {
Eric Dumazetd7e88832012-04-30 08:10:34 +00004556 __kfree_skb(skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004557 }
Eric Dumazetdaa86542012-04-19 07:07:40 +00004558 break;
4559
Ben Hutchings5b252f02009-10-29 07:17:09 +00004560 case GRO_HELD:
4561 case GRO_MERGED:
4562 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004563 }
4564
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004565 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004566}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004567
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004568gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004569{
Eric Dumazet93f93a42015-11-18 06:30:59 -08004570 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004571 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004572
Eric Dumazeta50e2332014-03-29 21:28:21 -07004573 skb_gro_reset_offset(skb);
4574
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004575 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004576}
4577EXPORT_SYMBOL(napi_gro_receive);
4578
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004579static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004580{
Eric Dumazet93a35f52014-10-23 06:30:30 -07004581 if (unlikely(skb->pfmemalloc)) {
4582 consume_skb(skb);
4583 return;
4584 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004585 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004586 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4587 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004588 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004589 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004590 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07004591 skb->encapsulation = 0;
4592 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004593 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08004594
4595 napi->skb = skb;
4596}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004597
Herbert Xu76620aa2009-04-16 02:02:07 -07004598struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004599{
Herbert Xu5d38a072009-01-04 16:13:40 -08004600 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004601
4602 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08004603 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08004604 if (skb) {
4605 napi->skb = skb;
4606 skb_mark_napi_id(skb, napi);
4607 }
Herbert Xu5d38a072009-01-04 16:13:40 -08004608 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004609 return skb;
4610}
Herbert Xu76620aa2009-04-16 02:02:07 -07004611EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004612
Eric Dumazeta50e2332014-03-29 21:28:21 -07004613static gro_result_t napi_frags_finish(struct napi_struct *napi,
4614 struct sk_buff *skb,
4615 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004616{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004617 switch (ret) {
4618 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004619 case GRO_HELD:
4620 __skb_push(skb, ETH_HLEN);
4621 skb->protocol = eth_type_trans(skb, skb->dev);
4622 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004623 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004624 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004625
4626 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004627 case GRO_MERGED_FREE:
4628 napi_reuse_skb(napi, skb);
4629 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004630
4631 case GRO_MERGED:
4632 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004633 }
4634
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004635 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004636}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004637
Eric Dumazeta50e2332014-03-29 21:28:21 -07004638/* Upper GRO stack assumes network header starts at gro_offset=0
4639 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4640 * We copy ethernet header into skb->data to have a common layout.
4641 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004642static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004643{
Herbert Xu76620aa2009-04-16 02:02:07 -07004644 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004645 const struct ethhdr *eth;
4646 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07004647
4648 napi->skb = NULL;
4649
Eric Dumazeta50e2332014-03-29 21:28:21 -07004650 skb_reset_mac_header(skb);
4651 skb_gro_reset_offset(skb);
4652
4653 eth = skb_gro_header_fast(skb, 0);
4654 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4655 eth = skb_gro_header_slow(skb, hlen, 0);
4656 if (unlikely(!eth)) {
4657 napi_reuse_skb(napi, skb);
4658 return NULL;
4659 }
4660 } else {
4661 gro_pull_from_frag0(skb, hlen);
4662 NAPI_GRO_CB(skb)->frag0 += hlen;
4663 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07004664 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07004665 __skb_pull(skb, hlen);
4666
4667 /*
4668 * This works because the only protocols we care about don't require
4669 * special handling.
4670 * We'll fix it up properly in napi_frags_finish()
4671 */
4672 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07004673
Herbert Xu76620aa2009-04-16 02:02:07 -07004674 return skb;
4675}
Herbert Xu76620aa2009-04-16 02:02:07 -07004676
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004677gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004678{
4679 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004680
4681 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004682 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004683
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004684 trace_napi_gro_frags_entry(skb);
4685
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004686 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004687}
4688EXPORT_SYMBOL(napi_gro_frags);
4689
Tom Herbert573e8fc2014-08-22 13:33:47 -07004690/* Compute the checksum from gro_offset and return the folded value
4691 * after adding in any pseudo checksum.
4692 */
4693__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4694{
4695 __wsum wsum;
4696 __sum16 sum;
4697
4698 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4699
4700 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4701 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4702 if (likely(!sum)) {
4703 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4704 !skb->csum_complete_sw)
4705 netdev_rx_csum_fault(skb->dev);
4706 }
4707
4708 NAPI_GRO_CB(skb)->csum = wsum;
4709 NAPI_GRO_CB(skb)->csum_valid = 1;
4710
4711 return sum;
4712}
4713EXPORT_SYMBOL(__skb_gro_checksum_complete);
4714
Eric Dumazete326bed2010-04-22 00:22:45 -07004715/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08004716 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07004717 * Note: called with local irq disabled, but exits with local irq enabled.
4718 */
4719static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4720{
4721#ifdef CONFIG_RPS
4722 struct softnet_data *remsd = sd->rps_ipi_list;
4723
4724 if (remsd) {
4725 sd->rps_ipi_list = NULL;
4726
4727 local_irq_enable();
4728
4729 /* Send pending IPI's to kick RPS processing on remote cpus. */
4730 while (remsd) {
4731 struct softnet_data *next = remsd->rps_ipi_next;
4732
4733 if (cpu_online(remsd->cpu))
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +01004734 smp_call_function_single_async(remsd->cpu,
Frederic Weisbeckerfce8ad12014-02-24 16:40:01 +01004735 &remsd->csd);
Eric Dumazete326bed2010-04-22 00:22:45 -07004736 remsd = next;
4737 }
4738 } else
4739#endif
4740 local_irq_enable();
4741}
4742
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004743static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4744{
4745#ifdef CONFIG_RPS
4746 return sd->rps_ipi_list != NULL;
4747#else
4748 return false;
4749#endif
4750}
4751
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004752static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753{
4754 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004755 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756
Eric Dumazete326bed2010-04-22 00:22:45 -07004757 /* Check if we have pending ipi, its better to send them now,
4758 * not waiting net_rx_action() end.
4759 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004760 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07004761 local_irq_disable();
4762 net_rps_action_and_irq_enable(sd);
4763 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004764
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004765 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004766 local_irq_disable();
Tom Herbert11ef7a82014-06-30 09:50:40 -07004767 while (1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769
Changli Gao6e7676c2010-04-27 15:07:33 -07004770 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03004771 rcu_read_lock();
Eric Dumazete4008272010-04-05 15:42:39 -07004772 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004773 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03004774 rcu_read_unlock();
Changli Gao6e7676c2010-04-27 15:07:33 -07004775 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004776 input_queue_head_incr(sd);
4777 if (++work >= quota) {
4778 local_irq_enable();
4779 return work;
4780 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782
Changli Gao6e7676c2010-04-27 15:07:33 -07004783 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07004784 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004785 /*
4786 * Inline a custom version of __napi_complete().
4787 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07004788 * and NAPI_STATE_SCHED is the only possible flag set
4789 * on backlog.
4790 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004791 * and we dont need an smp_mb() memory barrier.
4792 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004793 napi->state = 0;
Tom Herbert11ef7a82014-06-30 09:50:40 -07004794 rps_unlock(sd);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004795
Tom Herbert11ef7a82014-06-30 09:50:40 -07004796 break;
Changli Gao6e7676c2010-04-27 15:07:33 -07004797 }
Tom Herbert11ef7a82014-06-30 09:50:40 -07004798
4799 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4800 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07004801 rps_unlock(sd);
4802 }
4803 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004805 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806}
4807
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004808/**
4809 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004810 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004811 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07004812 * The entry's receive function will be scheduled to run.
4813 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004814 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004815void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004816{
4817 unsigned long flags;
4818
4819 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05004820 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004821 local_irq_restore(flags);
4822}
4823EXPORT_SYMBOL(__napi_schedule);
4824
Eric Dumazetbc9ad162014-10-28 18:05:13 -07004825/**
4826 * __napi_schedule_irqoff - schedule for receive
4827 * @n: entry to schedule
4828 *
4829 * Variant of __napi_schedule() assuming hard irqs are masked
4830 */
4831void __napi_schedule_irqoff(struct napi_struct *n)
4832{
4833 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4834}
4835EXPORT_SYMBOL(__napi_schedule_irqoff);
4836
Herbert Xud565b0a2008-12-15 23:38:52 -08004837void __napi_complete(struct napi_struct *n)
4838{
4839 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
Herbert Xud565b0a2008-12-15 23:38:52 -08004840
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004841 list_del_init(&n->poll_list);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004842 smp_mb__before_atomic();
Herbert Xud565b0a2008-12-15 23:38:52 -08004843 clear_bit(NAPI_STATE_SCHED, &n->state);
4844}
4845EXPORT_SYMBOL(__napi_complete);
4846
Eric Dumazet3b47d302014-11-06 21:09:44 -08004847void napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08004848{
4849 unsigned long flags;
4850
4851 /*
4852 * don't let napi dequeue from the cpu poll list
4853 * just in case its running on a different cpu
4854 */
4855 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4856 return;
4857
Eric Dumazet3b47d302014-11-06 21:09:44 -08004858 if (n->gro_list) {
4859 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004860
Eric Dumazet3b47d302014-11-06 21:09:44 -08004861 if (work_done)
4862 timeout = n->dev->gro_flush_timeout;
4863
4864 if (timeout)
4865 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4866 HRTIMER_MODE_REL_PINNED);
4867 else
4868 napi_gro_flush(n, false);
4869 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004870 if (likely(list_empty(&n->poll_list))) {
4871 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4872 } else {
4873 /* If n->poll_list is not empty, we need to mask irqs */
4874 local_irq_save(flags);
4875 __napi_complete(n);
4876 local_irq_restore(flags);
4877 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004878}
Eric Dumazet3b47d302014-11-06 21:09:44 -08004879EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08004880
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004881/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08004882static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004883{
4884 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4885 struct napi_struct *napi;
4886
4887 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4888 if (napi->napi_id == napi_id)
4889 return napi;
4890
4891 return NULL;
4892}
Eric Dumazet02d62e82015-11-18 06:30:52 -08004893
4894#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazetce6aea92015-11-18 06:30:54 -08004895#define BUSY_POLL_BUDGET 8
Eric Dumazet02d62e82015-11-18 06:30:52 -08004896bool sk_busy_loop(struct sock *sk, int nonblock)
4897{
4898 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
Eric Dumazetce6aea92015-11-18 06:30:54 -08004899 int (*busy_poll)(struct napi_struct *dev);
Eric Dumazet02d62e82015-11-18 06:30:52 -08004900 struct napi_struct *napi;
4901 int rc = false;
4902
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004903 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004904
4905 napi = napi_by_id(sk->sk_napi_id);
4906 if (!napi)
4907 goto out;
4908
Eric Dumazetce6aea92015-11-18 06:30:54 -08004909 /* Note: ndo_busy_poll method is optional in linux-4.5 */
4910 busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
Eric Dumazet02d62e82015-11-18 06:30:52 -08004911
4912 do {
Eric Dumazetce6aea92015-11-18 06:30:54 -08004913 rc = 0;
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004914 local_bh_disable();
Eric Dumazetce6aea92015-11-18 06:30:54 -08004915 if (busy_poll) {
4916 rc = busy_poll(napi);
4917 } else if (napi_schedule_prep(napi)) {
4918 void *have = netpoll_poll_lock(napi);
4919
4920 if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
4921 rc = napi->poll(napi, BUSY_POLL_BUDGET);
4922 trace_napi_poll(napi);
4923 if (rc == BUSY_POLL_BUDGET) {
4924 napi_complete_done(napi, rc);
4925 napi_schedule(napi);
4926 }
4927 }
4928 netpoll_poll_unlock(have);
4929 }
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004930 if (rc > 0)
4931 NET_ADD_STATS_BH(sock_net(sk),
4932 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
4933 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004934
4935 if (rc == LL_FLUSH_FAILED)
4936 break; /* permanent failure */
4937
Eric Dumazet02d62e82015-11-18 06:30:52 -08004938 cpu_relax();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004939 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
4940 !need_resched() && !busy_loop_timeout(end_time));
4941
4942 rc = !skb_queue_empty(&sk->sk_receive_queue);
4943out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004944 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004945 return rc;
4946}
4947EXPORT_SYMBOL(sk_busy_loop);
4948
4949#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004950
4951void napi_hash_add(struct napi_struct *napi)
4952{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08004953 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
4954 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004955 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004956
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004957 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004958
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004959 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4960 do {
4961 if (unlikely(++napi_gen_id < NR_CPUS + 1))
4962 napi_gen_id = NR_CPUS + 1;
4963 } while (napi_by_id(napi_gen_id));
4964 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004965
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004966 hlist_add_head_rcu(&napi->napi_hash_node,
4967 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004968
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004969 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004970}
4971EXPORT_SYMBOL_GPL(napi_hash_add);
4972
4973/* Warning : caller is responsible to make sure rcu grace period
4974 * is respected before freeing memory containing @napi
4975 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08004976bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004977{
Eric Dumazet34cbe272015-11-18 06:31:02 -08004978 bool rcu_sync_needed = false;
4979
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004980 spin_lock(&napi_hash_lock);
4981
Eric Dumazet34cbe272015-11-18 06:31:02 -08004982 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
4983 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004984 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08004985 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004986 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08004987 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004988}
4989EXPORT_SYMBOL_GPL(napi_hash_del);
4990
Eric Dumazet3b47d302014-11-06 21:09:44 -08004991static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4992{
4993 struct napi_struct *napi;
4994
4995 napi = container_of(timer, struct napi_struct, timer);
4996 if (napi->gro_list)
4997 napi_schedule(napi);
4998
4999 return HRTIMER_NORESTART;
5000}
5001
Herbert Xud565b0a2008-12-15 23:38:52 -08005002void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5003 int (*poll)(struct napi_struct *, int), int weight)
5004{
5005 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005006 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5007 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00005008 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005009 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08005010 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08005011 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00005012 if (weight > NAPI_POLL_WEIGHT)
5013 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5014 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08005015 napi->weight = weight;
5016 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005017 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08005018#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08005019 spin_lock_init(&napi->poll_lock);
5020 napi->poll_owner = -1;
5021#endif
5022 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08005023 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005024}
5025EXPORT_SYMBOL(netif_napi_add);
5026
Eric Dumazet3b47d302014-11-06 21:09:44 -08005027void napi_disable(struct napi_struct *n)
5028{
5029 might_sleep();
5030 set_bit(NAPI_STATE_DISABLE, &n->state);
5031
5032 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5033 msleep(1);
Neil Horman2d8bff1262015-09-23 14:57:58 -04005034 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5035 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005036
5037 hrtimer_cancel(&n->timer);
5038
5039 clear_bit(NAPI_STATE_DISABLE, &n->state);
5040}
5041EXPORT_SYMBOL(napi_disable);
5042
Eric Dumazet93d05d42015-11-18 06:31:03 -08005043/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08005044void netif_napi_del(struct napi_struct *napi)
5045{
Eric Dumazet93d05d42015-11-18 06:31:03 -08005046 might_sleep();
5047 if (napi_hash_del(napi))
5048 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08005049 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07005050 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005051
Eric Dumazet289dccb2013-12-20 14:29:08 -08005052 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005053 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005054 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005055}
5056EXPORT_SYMBOL(netif_napi_del);
5057
Herbert Xu726ce702014-12-21 07:16:21 +11005058static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5059{
5060 void *have;
5061 int work, weight;
5062
5063 list_del_init(&n->poll_list);
5064
5065 have = netpoll_poll_lock(n);
5066
5067 weight = n->weight;
5068
5069 /* This NAPI_STATE_SCHED test is for avoiding a race
5070 * with netpoll's poll_napi(). Only the entity which
5071 * obtains the lock and sees NAPI_STATE_SCHED set will
5072 * actually make the ->poll() call. Therefore we avoid
5073 * accidentally calling ->poll() when NAPI is not scheduled.
5074 */
5075 work = 0;
5076 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5077 work = n->poll(n, weight);
5078 trace_napi_poll(n);
5079 }
5080
5081 WARN_ON_ONCE(work > weight);
5082
5083 if (likely(work < weight))
5084 goto out_unlock;
5085
5086 /* Drivers must not modify the NAPI state if they
5087 * consume the entire weight. In such cases this code
5088 * still "owns" the NAPI instance and therefore can
5089 * move the instance around on the list at-will.
5090 */
5091 if (unlikely(napi_disable_pending(n))) {
5092 napi_complete(n);
5093 goto out_unlock;
5094 }
5095
5096 if (n->gro_list) {
5097 /* flush too old packets
5098 * If HZ < 1000, flush all packets.
5099 */
5100 napi_gro_flush(n, HZ >= 1000);
5101 }
5102
Herbert Xu001ce542014-12-21 07:16:22 +11005103 /* Some drivers may have called napi_schedule
5104 * prior to exhausting their budget.
5105 */
5106 if (unlikely(!list_empty(&n->poll_list))) {
5107 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5108 n->dev ? n->dev->name : "backlog");
5109 goto out_unlock;
5110 }
5111
Herbert Xu726ce702014-12-21 07:16:21 +11005112 list_add_tail(&n->poll_list, repoll);
5113
5114out_unlock:
5115 netpoll_poll_unlock(have);
5116
5117 return work;
5118}
5119
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120static void net_rx_action(struct softirq_action *h)
5121{
Christoph Lameter903ceff2014-08-17 12:30:35 -05005122 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005123 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07005124 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005125 LIST_HEAD(list);
5126 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07005127
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005129 list_splice_init(&sd->poll_list, &list);
5130 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005132 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005133 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005135 if (list_empty(&list)) {
5136 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
5137 return;
5138 break;
5139 }
5140
Herbert Xu6bd373e2014-12-21 07:16:24 +11005141 n = list_first_entry(&list, struct napi_struct, poll_list);
5142 budget -= napi_poll(n, &repoll);
5143
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005144 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005145 * Allow this to run for 2 jiffies since which will allow
5146 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005147 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005148 if (unlikely(budget <= 0 ||
5149 time_after_eq(jiffies, time_limit))) {
5150 sd->time_squeeze++;
5151 break;
5152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005154
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005155 local_irq_disable();
5156
5157 list_splice_tail_init(&sd->poll_list, &list);
5158 list_splice_tail(&repoll, &list);
5159 list_splice(&list, &sd->poll_list);
5160 if (!list_empty(&sd->poll_list))
5161 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5162
Eric Dumazete326bed2010-04-22 00:22:45 -07005163 net_rps_action_and_irq_enable(sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164}
5165
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005166struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005167 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005168
5169 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005170 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005171
Veaceslav Falico5d261912013-08-28 23:25:05 +02005172 /* counter for the number of times this device was added to us */
5173 u16 ref_nr;
5174
Veaceslav Falico402dae92013-09-25 09:20:09 +02005175 /* private field for the users */
5176 void *private;
5177
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005178 struct list_head list;
5179 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005180};
5181
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005182static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005183 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005184{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005185 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005186
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005187 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005188 if (adj->dev == adj_dev)
5189 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005190 }
5191 return NULL;
5192}
5193
5194/**
5195 * netdev_has_upper_dev - Check if device is linked to an upper device
5196 * @dev: device
5197 * @upper_dev: upper device to check
5198 *
5199 * Find out if a device is linked to specified upper device and return true
5200 * in case it is. Note that this checks only immediate upper device,
5201 * not through a complete stack of devices. The caller must hold the RTNL lock.
5202 */
5203bool netdev_has_upper_dev(struct net_device *dev,
5204 struct net_device *upper_dev)
5205{
5206 ASSERT_RTNL();
5207
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005208 return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005209}
5210EXPORT_SYMBOL(netdev_has_upper_dev);
5211
5212/**
5213 * netdev_has_any_upper_dev - Check if device is linked to some device
5214 * @dev: device
5215 *
5216 * Find out if a device is linked to an upper device and return true in case
5217 * it is. The caller must hold the RTNL lock.
5218 */
stephen hemminger1d143d92013-12-29 14:01:29 -08005219static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005220{
5221 ASSERT_RTNL();
5222
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005223 return !list_empty(&dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005224}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005225
5226/**
5227 * netdev_master_upper_dev_get - Get master upper device
5228 * @dev: device
5229 *
5230 * Find a master upper device and return pointer to it or NULL in case
5231 * it's not there. The caller must hold the RTNL lock.
5232 */
5233struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5234{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005235 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005236
5237 ASSERT_RTNL();
5238
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005239 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005240 return NULL;
5241
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005242 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005243 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005244 if (likely(upper->master))
5245 return upper->dev;
5246 return NULL;
5247}
5248EXPORT_SYMBOL(netdev_master_upper_dev_get);
5249
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02005250void *netdev_adjacent_get_private(struct list_head *adj_list)
5251{
5252 struct netdev_adjacent *adj;
5253
5254 adj = list_entry(adj_list, struct netdev_adjacent, list);
5255
5256 return adj->private;
5257}
5258EXPORT_SYMBOL(netdev_adjacent_get_private);
5259
Veaceslav Falico31088a12013-09-25 09:20:12 +02005260/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04005261 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5262 * @dev: device
5263 * @iter: list_head ** of the current position
5264 *
5265 * Gets the next device from the dev's upper list, starting from iter
5266 * position. The caller must hold RCU read lock.
5267 */
5268struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5269 struct list_head **iter)
5270{
5271 struct netdev_adjacent *upper;
5272
5273 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5274
5275 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5276
5277 if (&upper->list == &dev->adj_list.upper)
5278 return NULL;
5279
5280 *iter = &upper->list;
5281
5282 return upper->dev;
5283}
5284EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5285
5286/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005287 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
Veaceslav Falico48311f42013-08-28 23:25:07 +02005288 * @dev: device
5289 * @iter: list_head ** of the current position
5290 *
5291 * Gets the next device from the dev's upper list, starting from iter
5292 * position. The caller must hold RCU read lock.
5293 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005294struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
5295 struct list_head **iter)
Veaceslav Falico48311f42013-08-28 23:25:07 +02005296{
5297 struct netdev_adjacent *upper;
5298
John Fastabend85328242013-11-26 06:33:52 +00005299 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
Veaceslav Falico48311f42013-08-28 23:25:07 +02005300
5301 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5302
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005303 if (&upper->list == &dev->all_adj_list.upper)
Veaceslav Falico48311f42013-08-28 23:25:07 +02005304 return NULL;
5305
5306 *iter = &upper->list;
5307
5308 return upper->dev;
5309}
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005310EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
Veaceslav Falico48311f42013-08-28 23:25:07 +02005311
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005312/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005313 * netdev_lower_get_next_private - Get the next ->private from the
5314 * lower neighbour list
5315 * @dev: device
5316 * @iter: list_head ** of the current position
5317 *
5318 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5319 * list, starting from iter position. The caller must hold either hold the
5320 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005321 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02005322 */
5323void *netdev_lower_get_next_private(struct net_device *dev,
5324 struct list_head **iter)
5325{
5326 struct netdev_adjacent *lower;
5327
5328 lower = list_entry(*iter, struct netdev_adjacent, list);
5329
5330 if (&lower->list == &dev->adj_list.lower)
5331 return NULL;
5332
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005333 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005334
5335 return lower->private;
5336}
5337EXPORT_SYMBOL(netdev_lower_get_next_private);
5338
5339/**
5340 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5341 * lower neighbour list, RCU
5342 * variant
5343 * @dev: device
5344 * @iter: list_head ** of the current position
5345 *
5346 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5347 * list, starting from iter position. The caller must hold RCU read lock.
5348 */
5349void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5350 struct list_head **iter)
5351{
5352 struct netdev_adjacent *lower;
5353
5354 WARN_ON_ONCE(!rcu_read_lock_held());
5355
5356 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5357
5358 if (&lower->list == &dev->adj_list.lower)
5359 return NULL;
5360
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005361 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005362
5363 return lower->private;
5364}
5365EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5366
5367/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005368 * netdev_lower_get_next - Get the next device from the lower neighbour
5369 * list
5370 * @dev: device
5371 * @iter: list_head ** of the current position
5372 *
5373 * Gets the next netdev_adjacent from the dev's lower neighbour
5374 * list, starting from iter position. The caller must hold RTNL lock or
5375 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005376 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005377 */
5378void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5379{
5380 struct netdev_adjacent *lower;
5381
5382 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5383
5384 if (&lower->list == &dev->adj_list.lower)
5385 return NULL;
5386
5387 *iter = &lower->list;
5388
5389 return lower->dev;
5390}
5391EXPORT_SYMBOL(netdev_lower_get_next);
5392
5393/**
dingtianhonge001bfa2013-12-13 10:19:55 +08005394 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5395 * lower neighbour list, RCU
5396 * variant
5397 * @dev: device
5398 *
5399 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5400 * list. The caller must hold RCU read lock.
5401 */
5402void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5403{
5404 struct netdev_adjacent *lower;
5405
5406 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5407 struct netdev_adjacent, list);
5408 if (lower)
5409 return lower->private;
5410 return NULL;
5411}
5412EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5413
5414/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005415 * netdev_master_upper_dev_get_rcu - Get master upper device
5416 * @dev: device
5417 *
5418 * Find a master upper device and return pointer to it or NULL in case
5419 * it's not there. The caller must hold the RCU read lock.
5420 */
5421struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5422{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005423 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005424
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005425 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005426 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005427 if (upper && likely(upper->master))
5428 return upper->dev;
5429 return NULL;
5430}
5431EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5432
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305433static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005434 struct net_device *adj_dev,
5435 struct list_head *dev_list)
5436{
5437 char linkname[IFNAMSIZ+7];
5438 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5439 "upper_%s" : "lower_%s", adj_dev->name);
5440 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5441 linkname);
5442}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305443static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005444 char *name,
5445 struct list_head *dev_list)
5446{
5447 char linkname[IFNAMSIZ+7];
5448 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5449 "upper_%s" : "lower_%s", name);
5450 sysfs_remove_link(&(dev->dev.kobj), linkname);
5451}
5452
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005453static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5454 struct net_device *adj_dev,
5455 struct list_head *dev_list)
5456{
5457 return (dev_list == &dev->adj_list.upper ||
5458 dev_list == &dev->adj_list.lower) &&
5459 net_eq(dev_net(dev), dev_net(adj_dev));
5460}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005461
Veaceslav Falico5d261912013-08-28 23:25:05 +02005462static int __netdev_adjacent_dev_insert(struct net_device *dev,
5463 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02005464 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005465 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005466{
5467 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005468 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005469
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005470 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005471
5472 if (adj) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005473 adj->ref_nr++;
5474 return 0;
5475 }
5476
5477 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5478 if (!adj)
5479 return -ENOMEM;
5480
5481 adj->dev = adj_dev;
5482 adj->master = master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005483 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005484 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005485 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005486
5487 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5488 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005489
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005490 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005491 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02005492 if (ret)
5493 goto free_adj;
5494 }
5495
Veaceslav Falico7863c052013-09-25 09:20:06 +02005496 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005497 if (master) {
5498 ret = sysfs_create_link(&(dev->dev.kobj),
5499 &(adj_dev->dev.kobj), "master");
5500 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02005501 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005502
Veaceslav Falico7863c052013-09-25 09:20:06 +02005503 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005504 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02005505 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005506 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005507
5508 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005509
Veaceslav Falico5831d662013-09-25 09:20:32 +02005510remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005511 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005512 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005513free_adj:
5514 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02005515 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005516
5517 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005518}
5519
stephen hemminger1d143d92013-12-29 14:01:29 -08005520static void __netdev_adjacent_dev_remove(struct net_device *dev,
5521 struct net_device *adj_dev,
5522 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005523{
5524 struct netdev_adjacent *adj;
5525
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005526 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005527
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005528 if (!adj) {
5529 pr_err("tried to remove device %s from %s\n",
5530 dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005531 BUG();
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005532 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005533
5534 if (adj->ref_nr > 1) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005535 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5536 adj->ref_nr-1);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005537 adj->ref_nr--;
5538 return;
5539 }
5540
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005541 if (adj->master)
5542 sysfs_remove_link(&(dev->dev.kobj), "master");
5543
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005544 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005545 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02005546
Veaceslav Falico5d261912013-08-28 23:25:05 +02005547 list_del_rcu(&adj->list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005548 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5549 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005550 dev_put(adj_dev);
5551 kfree_rcu(adj, rcu);
5552}
5553
stephen hemminger1d143d92013-12-29 14:01:29 -08005554static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5555 struct net_device *upper_dev,
5556 struct list_head *up_list,
5557 struct list_head *down_list,
5558 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005559{
5560 int ret;
5561
Veaceslav Falico402dae92013-09-25 09:20:09 +02005562 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5563 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005564 if (ret)
5565 return ret;
5566
Veaceslav Falico402dae92013-09-25 09:20:09 +02005567 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5568 false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005569 if (ret) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005570 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005571 return ret;
5572 }
5573
5574 return 0;
5575}
5576
stephen hemminger1d143d92013-12-29 14:01:29 -08005577static int __netdev_adjacent_dev_link(struct net_device *dev,
5578 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005579{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005580 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5581 &dev->all_adj_list.upper,
5582 &upper_dev->all_adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005583 NULL, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005584}
5585
stephen hemminger1d143d92013-12-29 14:01:29 -08005586static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5587 struct net_device *upper_dev,
5588 struct list_head *up_list,
5589 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005590{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005591 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5592 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005593}
5594
stephen hemminger1d143d92013-12-29 14:01:29 -08005595static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5596 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005597{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005598 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5599 &dev->all_adj_list.upper,
5600 &upper_dev->all_adj_list.lower);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005601}
5602
stephen hemminger1d143d92013-12-29 14:01:29 -08005603static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5604 struct net_device *upper_dev,
5605 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005606{
5607 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5608
5609 if (ret)
5610 return ret;
5611
5612 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5613 &dev->adj_list.upper,
5614 &upper_dev->adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005615 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005616 if (ret) {
5617 __netdev_adjacent_dev_unlink(dev, upper_dev);
5618 return ret;
5619 }
5620
5621 return 0;
5622}
5623
stephen hemminger1d143d92013-12-29 14:01:29 -08005624static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5625 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005626{
5627 __netdev_adjacent_dev_unlink(dev, upper_dev);
5628 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5629 &dev->adj_list.upper,
5630 &upper_dev->adj_list.lower);
5631}
Veaceslav Falico5d261912013-08-28 23:25:05 +02005632
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005633static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005634 struct net_device *upper_dev, bool master,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005635 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005636{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005637 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005638 struct netdev_adjacent *i, *j, *to_i, *to_j;
5639 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005640
5641 ASSERT_RTNL();
5642
5643 if (dev == upper_dev)
5644 return -EBUSY;
5645
5646 /* To prevent loops, check if dev is not upper device to upper_dev. */
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005647 if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005648 return -EBUSY;
5649
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005650 if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005651 return -EEXIST;
5652
5653 if (master && netdev_master_upper_dev_get(dev))
5654 return -EBUSY;
5655
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005656 changeupper_info.upper_dev = upper_dev;
5657 changeupper_info.master = master;
5658 changeupper_info.linking = true;
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005659 changeupper_info.upper_info = upper_info;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005660
Jiri Pirko573c7ba2015-10-16 14:01:22 +02005661 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5662 &changeupper_info.info);
5663 ret = notifier_to_errno(ret);
5664 if (ret)
5665 return ret;
5666
Jiri Pirko6dffb042015-12-03 12:12:10 +01005667 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005668 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005669 if (ret)
5670 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005671
Veaceslav Falico5d261912013-08-28 23:25:05 +02005672 /* Now that we linked these devs, make all the upper_dev's
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005673 * all_adj_list.upper visible to every dev's all_adj_list.lower an
Veaceslav Falico5d261912013-08-28 23:25:05 +02005674 * versa, and don't forget the devices itself. All of these
5675 * links are non-neighbours.
5676 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005677 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5678 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5679 pr_debug("Interlinking %s with %s, non-neighbour\n",
5680 i->dev->name, j->dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005681 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5682 if (ret)
5683 goto rollback_mesh;
5684 }
5685 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005686
Veaceslav Falico5d261912013-08-28 23:25:05 +02005687 /* add dev to every upper_dev's upper device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005688 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5689 pr_debug("linking %s's upper device %s with %s\n",
5690 upper_dev->name, i->dev->name, dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005691 ret = __netdev_adjacent_dev_link(dev, i->dev);
5692 if (ret)
5693 goto rollback_upper_mesh;
5694 }
5695
5696 /* add upper_dev to every dev's lower device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005697 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5698 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5699 i->dev->name, upper_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005700 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5701 if (ret)
5702 goto rollback_lower_mesh;
5703 }
5704
Ido Schimmelb03804e2015-12-03 12:12:03 +01005705 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5706 &changeupper_info.info);
5707 ret = notifier_to_errno(ret);
5708 if (ret)
5709 goto rollback_lower_mesh;
5710
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005711 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005712
5713rollback_lower_mesh:
5714 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005715 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005716 if (i == to_i)
5717 break;
5718 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5719 }
5720
5721 i = NULL;
5722
5723rollback_upper_mesh:
5724 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005725 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005726 if (i == to_i)
5727 break;
5728 __netdev_adjacent_dev_unlink(dev, i->dev);
5729 }
5730
5731 i = j = NULL;
5732
5733rollback_mesh:
5734 to_i = i;
5735 to_j = j;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005736 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5737 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005738 if (i == to_i && j == to_j)
5739 break;
5740 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5741 }
5742 if (i == to_i)
5743 break;
5744 }
5745
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005746 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005747
5748 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005749}
5750
5751/**
5752 * netdev_upper_dev_link - Add a link to the upper device
5753 * @dev: device
5754 * @upper_dev: new upper device
5755 *
5756 * Adds a link to device which is upper to this one. The caller must hold
5757 * the RTNL lock. On a failure a negative errno code is returned.
5758 * On success the reference counts are adjusted and the function
5759 * returns zero.
5760 */
5761int netdev_upper_dev_link(struct net_device *dev,
5762 struct net_device *upper_dev)
5763{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005764 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005765}
5766EXPORT_SYMBOL(netdev_upper_dev_link);
5767
5768/**
5769 * netdev_master_upper_dev_link - Add a master link to the upper device
5770 * @dev: device
5771 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01005772 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005773 * @upper_info: upper info to be passed down via notifier
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005774 *
5775 * Adds a link to device which is upper to this one. In this case, only
5776 * one master upper device can be linked, although other non-master devices
5777 * might be linked as well. The caller must hold the RTNL lock.
5778 * On a failure a negative errno code is returned. On success the reference
5779 * counts are adjusted and the function returns zero.
5780 */
5781int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01005782 struct net_device *upper_dev,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005783 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005784{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005785 return __netdev_upper_dev_link(dev, upper_dev, true,
5786 upper_priv, upper_info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005787}
5788EXPORT_SYMBOL(netdev_master_upper_dev_link);
5789
5790/**
5791 * netdev_upper_dev_unlink - Removes a link to upper device
5792 * @dev: device
5793 * @upper_dev: new upper device
5794 *
5795 * Removes a link to device which is upper to this one. The caller must hold
5796 * the RTNL lock.
5797 */
5798void netdev_upper_dev_unlink(struct net_device *dev,
5799 struct net_device *upper_dev)
5800{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005801 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005802 struct netdev_adjacent *i, *j;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005803 ASSERT_RTNL();
5804
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005805 changeupper_info.upper_dev = upper_dev;
5806 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5807 changeupper_info.linking = false;
5808
Jiri Pirko573c7ba2015-10-16 14:01:22 +02005809 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5810 &changeupper_info.info);
5811
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005812 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005813
5814 /* Here is the tricky part. We must remove all dev's lower
5815 * devices from all upper_dev's upper devices and vice
5816 * versa, to maintain the graph relationship.
5817 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005818 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5819 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005820 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5821
5822 /* remove also the devices itself from lower/upper device
5823 * list
5824 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005825 list_for_each_entry(i, &dev->all_adj_list.lower, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005826 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5827
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005828 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005829 __netdev_adjacent_dev_unlink(dev, i->dev);
5830
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005831 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5832 &changeupper_info.info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005833}
5834EXPORT_SYMBOL(netdev_upper_dev_unlink);
5835
Moni Shoua61bd3852015-02-03 16:48:29 +02005836/**
5837 * netdev_bonding_info_change - Dispatch event about slave change
5838 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09005839 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02005840 *
5841 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5842 * The caller must hold the RTNL lock.
5843 */
5844void netdev_bonding_info_change(struct net_device *dev,
5845 struct netdev_bonding_info *bonding_info)
5846{
5847 struct netdev_notifier_bonding_info info;
5848
5849 memcpy(&info.bonding_info, bonding_info,
5850 sizeof(struct netdev_bonding_info));
5851 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5852 &info.info);
5853}
5854EXPORT_SYMBOL(netdev_bonding_info_change);
5855
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08005856static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005857{
5858 struct netdev_adjacent *iter;
5859
5860 struct net *net = dev_net(dev);
5861
5862 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5863 if (!net_eq(net,dev_net(iter->dev)))
5864 continue;
5865 netdev_adjacent_sysfs_add(iter->dev, dev,
5866 &iter->dev->adj_list.lower);
5867 netdev_adjacent_sysfs_add(dev, iter->dev,
5868 &dev->adj_list.upper);
5869 }
5870
5871 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5872 if (!net_eq(net,dev_net(iter->dev)))
5873 continue;
5874 netdev_adjacent_sysfs_add(iter->dev, dev,
5875 &iter->dev->adj_list.upper);
5876 netdev_adjacent_sysfs_add(dev, iter->dev,
5877 &dev->adj_list.lower);
5878 }
5879}
5880
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08005881static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005882{
5883 struct netdev_adjacent *iter;
5884
5885 struct net *net = dev_net(dev);
5886
5887 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5888 if (!net_eq(net,dev_net(iter->dev)))
5889 continue;
5890 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5891 &iter->dev->adj_list.lower);
5892 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5893 &dev->adj_list.upper);
5894 }
5895
5896 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5897 if (!net_eq(net,dev_net(iter->dev)))
5898 continue;
5899 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5900 &iter->dev->adj_list.upper);
5901 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5902 &dev->adj_list.lower);
5903 }
5904}
5905
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005906void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02005907{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005908 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005909
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005910 struct net *net = dev_net(dev);
5911
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005912 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005913 if (!net_eq(net,dev_net(iter->dev)))
5914 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005915 netdev_adjacent_sysfs_del(iter->dev, oldname,
5916 &iter->dev->adj_list.lower);
5917 netdev_adjacent_sysfs_add(iter->dev, dev,
5918 &iter->dev->adj_list.lower);
5919 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02005920
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005921 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005922 if (!net_eq(net,dev_net(iter->dev)))
5923 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005924 netdev_adjacent_sysfs_del(iter->dev, oldname,
5925 &iter->dev->adj_list.upper);
5926 netdev_adjacent_sysfs_add(iter->dev, dev,
5927 &iter->dev->adj_list.upper);
5928 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02005929}
Veaceslav Falico402dae92013-09-25 09:20:09 +02005930
5931void *netdev_lower_dev_get_private(struct net_device *dev,
5932 struct net_device *lower_dev)
5933{
5934 struct netdev_adjacent *lower;
5935
5936 if (!lower_dev)
5937 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005938 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02005939 if (!lower)
5940 return NULL;
5941
5942 return lower->private;
5943}
5944EXPORT_SYMBOL(netdev_lower_dev_get_private);
5945
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005946
5947int dev_get_nest_level(struct net_device *dev,
Jiri Pirkob618aaa2015-12-04 15:01:31 +01005948 bool (*type_check)(const struct net_device *dev))
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005949{
5950 struct net_device *lower = NULL;
5951 struct list_head *iter;
5952 int max_nest = -1;
5953 int nest;
5954
5955 ASSERT_RTNL();
5956
5957 netdev_for_each_lower_dev(dev, lower, iter) {
5958 nest = dev_get_nest_level(lower, type_check);
5959 if (max_nest < nest)
5960 max_nest = nest;
5961 }
5962
5963 if (type_check(dev))
5964 max_nest++;
5965
5966 return max_nest;
5967}
5968EXPORT_SYMBOL(dev_get_nest_level);
5969
Jiri Pirko04d48262015-12-03 12:12:15 +01005970/**
5971 * netdev_lower_change - Dispatch event about lower device state change
5972 * @lower_dev: device
5973 * @lower_state_info: state to dispatch
5974 *
5975 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
5976 * The caller must hold the RTNL lock.
5977 */
5978void netdev_lower_state_changed(struct net_device *lower_dev,
5979 void *lower_state_info)
5980{
5981 struct netdev_notifier_changelowerstate_info changelowerstate_info;
5982
5983 ASSERT_RTNL();
5984 changelowerstate_info.lower_state_info = lower_state_info;
5985 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
5986 &changelowerstate_info.info);
5987}
5988EXPORT_SYMBOL(netdev_lower_state_changed);
5989
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005990static void dev_change_rx_flags(struct net_device *dev, int flags)
5991{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005992 const struct net_device_ops *ops = dev->netdev_ops;
5993
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05005994 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005995 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005996}
5997
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005998static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07005999{
Eric Dumazetb536db92011-11-30 21:42:26 +00006000 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006001 kuid_t uid;
6002 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07006003
Patrick McHardy24023452007-07-14 18:51:31 -07006004 ASSERT_RTNL();
6005
Wang Chendad9b332008-06-18 01:48:28 -07006006 dev->flags |= IFF_PROMISC;
6007 dev->promiscuity += inc;
6008 if (dev->promiscuity == 0) {
6009 /*
6010 * Avoid overflow.
6011 * If inc causes overflow, untouch promisc and return error.
6012 */
6013 if (inc < 0)
6014 dev->flags &= ~IFF_PROMISC;
6015 else {
6016 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006017 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6018 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006019 return -EOVERFLOW;
6020 }
6021 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006022 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006023 pr_info("device %s %s promiscuous mode\n",
6024 dev->name,
6025 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11006026 if (audit_enabled) {
6027 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006028 audit_log(current->audit_context, GFP_ATOMIC,
6029 AUDIT_ANOM_PROMISCUOUS,
6030 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6031 dev->name, (dev->flags & IFF_PROMISC),
6032 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07006033 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006034 from_kuid(&init_user_ns, uid),
6035 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006036 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11006037 }
Patrick McHardy24023452007-07-14 18:51:31 -07006038
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006039 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07006040 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006041 if (notify)
6042 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07006043 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006044}
6045
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046/**
6047 * dev_set_promiscuity - update promiscuity count on a device
6048 * @dev: device
6049 * @inc: modifier
6050 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07006051 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006052 * remains above zero the interface remains promiscuous. Once it hits zero
6053 * the device reverts back to normal filtering operation. A negative inc
6054 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07006055 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006056 */
Wang Chendad9b332008-06-18 01:48:28 -07006057int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006058{
Eric Dumazetb536db92011-11-30 21:42:26 +00006059 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07006060 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006061
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006062 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07006063 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07006064 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07006065 if (dev->flags != old_flags)
6066 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07006067 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006068}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006069EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006071static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006073 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006074
Patrick McHardy24023452007-07-14 18:51:31 -07006075 ASSERT_RTNL();
6076
Linus Torvalds1da177e2005-04-16 15:20:36 -07006077 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07006078 dev->allmulti += inc;
6079 if (dev->allmulti == 0) {
6080 /*
6081 * Avoid overflow.
6082 * If inc causes overflow, untouch allmulti and return error.
6083 */
6084 if (inc < 0)
6085 dev->flags &= ~IFF_ALLMULTI;
6086 else {
6087 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006088 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6089 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006090 return -EOVERFLOW;
6091 }
6092 }
Patrick McHardy24023452007-07-14 18:51:31 -07006093 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006094 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07006095 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006096 if (notify)
6097 __dev_notify_flags(dev, old_flags,
6098 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07006099 }
Wang Chendad9b332008-06-18 01:48:28 -07006100 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006101}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006102
6103/**
6104 * dev_set_allmulti - update allmulti count on a device
6105 * @dev: device
6106 * @inc: modifier
6107 *
6108 * Add or remove reception of all multicast frames to a device. While the
6109 * count in the device remains above zero the interface remains listening
6110 * to all interfaces. Once it hits zero the device reverts back to normal
6111 * filtering operation. A negative @inc value is used to drop the counter
6112 * when releasing a resource needing all multicasts.
6113 * Return 0 if successful or a negative errno code on error.
6114 */
6115
6116int dev_set_allmulti(struct net_device *dev, int inc)
6117{
6118 return __dev_set_allmulti(dev, inc, true);
6119}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006120EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07006121
6122/*
6123 * Upload unicast and multicast address lists to device and
6124 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08006125 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07006126 * are present.
6127 */
6128void __dev_set_rx_mode(struct net_device *dev)
6129{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006130 const struct net_device_ops *ops = dev->netdev_ops;
6131
Patrick McHardy4417da62007-06-27 01:28:10 -07006132 /* dev_open will call this function so the list will stay sane. */
6133 if (!(dev->flags&IFF_UP))
6134 return;
6135
6136 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09006137 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07006138
Jiri Pirko01789342011-08-16 06:29:00 +00006139 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07006140 /* Unicast addresses changes may only happen under the rtnl,
6141 * therefore calling __dev_set_promiscuity here is safe.
6142 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006143 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006144 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006145 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006146 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006147 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006148 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07006149 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006150 }
Jiri Pirko01789342011-08-16 06:29:00 +00006151
6152 if (ops->ndo_set_rx_mode)
6153 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006154}
6155
6156void dev_set_rx_mode(struct net_device *dev)
6157{
David S. Millerb9e40852008-07-15 00:15:08 -07006158 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006159 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07006160 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006161}
6162
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006163/**
6164 * dev_get_flags - get flags reported to userspace
6165 * @dev: device
6166 *
6167 * Get the combination of flag bits exported through APIs to userspace.
6168 */
Eric Dumazet95c96172012-04-15 05:58:06 +00006169unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006170{
Eric Dumazet95c96172012-04-15 05:58:06 +00006171 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006172
6173 flags = (dev->flags & ~(IFF_PROMISC |
6174 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08006175 IFF_RUNNING |
6176 IFF_LOWER_UP |
6177 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178 (dev->gflags & (IFF_PROMISC |
6179 IFF_ALLMULTI));
6180
Stefan Rompfb00055a2006-03-20 17:09:11 -08006181 if (netif_running(dev)) {
6182 if (netif_oper_up(dev))
6183 flags |= IFF_RUNNING;
6184 if (netif_carrier_ok(dev))
6185 flags |= IFF_LOWER_UP;
6186 if (netif_dormant(dev))
6187 flags |= IFF_DORMANT;
6188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006189
6190 return flags;
6191}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006192EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006193
Patrick McHardybd380812010-02-26 06:34:53 +00006194int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006195{
Eric Dumazetb536db92011-11-30 21:42:26 +00006196 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00006197 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198
Patrick McHardy24023452007-07-14 18:51:31 -07006199 ASSERT_RTNL();
6200
Linus Torvalds1da177e2005-04-16 15:20:36 -07006201 /*
6202 * Set the flags on our device.
6203 */
6204
6205 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6206 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6207 IFF_AUTOMEDIA)) |
6208 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6209 IFF_ALLMULTI));
6210
6211 /*
6212 * Load in the correct multicast list now the flags have changed.
6213 */
6214
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006215 if ((old_flags ^ flags) & IFF_MULTICAST)
6216 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07006217
Patrick McHardy4417da62007-06-27 01:28:10 -07006218 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006219
6220 /*
6221 * Have we downed the interface. We handle IFF_UP ourselves
6222 * according to user attempts to set it, rather than blindly
6223 * setting it.
6224 */
6225
6226 ret = 0;
Peter Pan(潘卫平)d215d102014-06-16 21:57:22 +08006227 if ((old_flags ^ flags) & IFF_UP)
Patrick McHardybd380812010-02-26 06:34:53 +00006228 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229
Linus Torvalds1da177e2005-04-16 15:20:36 -07006230 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006231 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006232 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006233
Linus Torvalds1da177e2005-04-16 15:20:36 -07006234 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006235
6236 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6237 if (dev->flags != old_flags)
6238 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239 }
6240
6241 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6242 is important. Some (broken) drivers set IFF_PROMISC, when
6243 IFF_ALLMULTI is requested not asking us and not reporting.
6244 */
6245 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006246 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6247
Linus Torvalds1da177e2005-04-16 15:20:36 -07006248 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006249 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006250 }
6251
Patrick McHardybd380812010-02-26 06:34:53 +00006252 return ret;
6253}
6254
Nicolas Dichtela528c212013-09-25 12:02:44 +02006255void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6256 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00006257{
6258 unsigned int changes = dev->flags ^ old_flags;
6259
Nicolas Dichtela528c212013-09-25 12:02:44 +02006260 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006261 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006262
Patrick McHardybd380812010-02-26 06:34:53 +00006263 if (changes & IFF_UP) {
6264 if (dev->flags & IFF_UP)
6265 call_netdevice_notifiers(NETDEV_UP, dev);
6266 else
6267 call_netdevice_notifiers(NETDEV_DOWN, dev);
6268 }
6269
6270 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00006271 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6272 struct netdev_notifier_change_info change_info;
6273
6274 change_info.flags_changed = changes;
6275 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6276 &change_info.info);
6277 }
Patrick McHardybd380812010-02-26 06:34:53 +00006278}
6279
6280/**
6281 * dev_change_flags - change device settings
6282 * @dev: device
6283 * @flags: device state flags
6284 *
6285 * Change settings on device based state flags. The flags are
6286 * in the userspace exported format.
6287 */
Eric Dumazetb536db92011-11-30 21:42:26 +00006288int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00006289{
Eric Dumazetb536db92011-11-30 21:42:26 +00006290 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006291 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00006292
6293 ret = __dev_change_flags(dev, flags);
6294 if (ret < 0)
6295 return ret;
6296
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006297 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006298 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299 return ret;
6300}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006301EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006302
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006303static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6304{
6305 const struct net_device_ops *ops = dev->netdev_ops;
6306
6307 if (ops->ndo_change_mtu)
6308 return ops->ndo_change_mtu(dev, new_mtu);
6309
6310 dev->mtu = new_mtu;
6311 return 0;
6312}
6313
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006314/**
6315 * dev_set_mtu - Change maximum transfer unit
6316 * @dev: device
6317 * @new_mtu: new transfer unit
6318 *
6319 * Change the maximum transfer size of the network device.
6320 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006321int dev_set_mtu(struct net_device *dev, int new_mtu)
6322{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006323 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006324
6325 if (new_mtu == dev->mtu)
6326 return 0;
6327
6328 /* MTU must be positive. */
6329 if (new_mtu < 0)
6330 return -EINVAL;
6331
6332 if (!netif_device_present(dev))
6333 return -ENODEV;
6334
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01006335 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6336 err = notifier_to_errno(err);
6337 if (err)
6338 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006339
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006340 orig_mtu = dev->mtu;
6341 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006343 if (!err) {
6344 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6345 err = notifier_to_errno(err);
6346 if (err) {
6347 /* setting mtu back and notifying everyone again,
6348 * so that they have a chance to revert changes.
6349 */
6350 __dev_set_mtu(dev, orig_mtu);
6351 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6352 }
6353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006354 return err;
6355}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006356EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006357
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006358/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006359 * dev_set_group - Change group this device belongs to
6360 * @dev: device
6361 * @new_group: group this device should belong to
6362 */
6363void dev_set_group(struct net_device *dev, int new_group)
6364{
6365 dev->group = new_group;
6366}
6367EXPORT_SYMBOL(dev_set_group);
6368
6369/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006370 * dev_set_mac_address - Change Media Access Control Address
6371 * @dev: device
6372 * @sa: new address
6373 *
6374 * Change the hardware (MAC) address of the device
6375 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006376int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6377{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006378 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006379 int err;
6380
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006381 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382 return -EOPNOTSUPP;
6383 if (sa->sa_family != dev->type)
6384 return -EINVAL;
6385 if (!netif_device_present(dev))
6386 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006387 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00006388 if (err)
6389 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00006390 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00006391 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006392 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00006393 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006394}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006395EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006397/**
6398 * dev_change_carrier - Change device carrier
6399 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00006400 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006401 *
6402 * Change device carrier
6403 */
6404int dev_change_carrier(struct net_device *dev, bool new_carrier)
6405{
6406 const struct net_device_ops *ops = dev->netdev_ops;
6407
6408 if (!ops->ndo_change_carrier)
6409 return -EOPNOTSUPP;
6410 if (!netif_device_present(dev))
6411 return -ENODEV;
6412 return ops->ndo_change_carrier(dev, new_carrier);
6413}
6414EXPORT_SYMBOL(dev_change_carrier);
6415
Linus Torvalds1da177e2005-04-16 15:20:36 -07006416/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02006417 * dev_get_phys_port_id - Get device physical port ID
6418 * @dev: device
6419 * @ppid: port ID
6420 *
6421 * Get device physical port ID
6422 */
6423int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01006424 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02006425{
6426 const struct net_device_ops *ops = dev->netdev_ops;
6427
6428 if (!ops->ndo_get_phys_port_id)
6429 return -EOPNOTSUPP;
6430 return ops->ndo_get_phys_port_id(dev, ppid);
6431}
6432EXPORT_SYMBOL(dev_get_phys_port_id);
6433
6434/**
David Aherndb24a902015-03-17 20:23:15 -06006435 * dev_get_phys_port_name - Get device physical port name
6436 * @dev: device
6437 * @name: port name
6438 *
6439 * Get device physical port name
6440 */
6441int dev_get_phys_port_name(struct net_device *dev,
6442 char *name, size_t len)
6443{
6444 const struct net_device_ops *ops = dev->netdev_ops;
6445
6446 if (!ops->ndo_get_phys_port_name)
6447 return -EOPNOTSUPP;
6448 return ops->ndo_get_phys_port_name(dev, name, len);
6449}
6450EXPORT_SYMBOL(dev_get_phys_port_name);
6451
6452/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006453 * dev_change_proto_down - update protocol port state information
6454 * @dev: device
6455 * @proto_down: new value
6456 *
6457 * This info can be used by switch drivers to set the phys state of the
6458 * port.
6459 */
6460int dev_change_proto_down(struct net_device *dev, bool proto_down)
6461{
6462 const struct net_device_ops *ops = dev->netdev_ops;
6463
6464 if (!ops->ndo_change_proto_down)
6465 return -EOPNOTSUPP;
6466 if (!netif_device_present(dev))
6467 return -ENODEV;
6468 return ops->ndo_change_proto_down(dev, proto_down);
6469}
6470EXPORT_SYMBOL(dev_change_proto_down);
6471
6472/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006473 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07006474 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07006475 *
6476 * Returns a suitable unique value for a new device interface
6477 * number. The caller must hold the rtnl semaphore or the
6478 * dev_base_lock to be sure it remains unique.
6479 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07006480static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006481{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00006482 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006483 for (;;) {
6484 if (++ifindex <= 0)
6485 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006486 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00006487 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006488 }
6489}
6490
Linus Torvalds1da177e2005-04-16 15:20:36 -07006491/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08006492static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07006493DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006494
Stephen Hemminger6f05f622007-03-08 20:46:03 -08006495static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006496{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006497 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07006498 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006499}
6500
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006501static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006502{
Krishna Kumare93737b2009-12-08 22:26:02 +00006503 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006504 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006505
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006506 BUG_ON(dev_boot_phase);
6507 ASSERT_RTNL();
6508
Krishna Kumare93737b2009-12-08 22:26:02 +00006509 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006510 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00006511 * for initialization unwind. Remove those
6512 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006513 */
6514 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006515 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6516 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006517
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006518 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00006519 list_del(&dev->unreg_list);
6520 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006521 }
Eric Dumazet449f4542011-05-19 12:24:16 +00006522 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006523 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00006524 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006525
Octavian Purdila44345722010-12-13 12:44:07 +00006526 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006527 list_for_each_entry(dev, head, unreg_list)
6528 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04006529 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006530
Octavian Purdila44345722010-12-13 12:44:07 +00006531 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006532 /* And unlink it from device chain. */
6533 unlist_netdevice(dev);
6534
6535 dev->reg_state = NETREG_UNREGISTERING;
Julian Anastasove9e4dd32015-07-09 09:59:09 +03006536 on_each_cpu(flush_backlog, dev, 1);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006537 }
6538
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006539 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006540
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006541 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006542 struct sk_buff *skb = NULL;
6543
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006544 /* Shutdown queueing discipline. */
6545 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006546
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006547
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006548 /* Notify protocols, that we are about to destroy
6549 this device. They should clean all the things.
6550 */
6551 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6552
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006553 if (!dev->rtnl_link_ops ||
6554 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6555 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6556 GFP_KERNEL);
6557
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006558 /*
6559 * Flush the unicast and multicast chains
6560 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006561 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006562 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006563
6564 if (dev->netdev_ops->ndo_uninit)
6565 dev->netdev_ops->ndo_uninit(dev);
6566
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006567 if (skb)
6568 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07006569
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006570 /* Notifier chain MUST detach us all upper devices. */
6571 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006572
6573 /* Remove entries from kobject tree */
6574 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00006575#ifdef CONFIG_XPS
6576 /* Remove XPS queueing entries */
6577 netif_reset_xps_queues_gt(dev, 0);
6578#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006579 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006580
Eric W. Biederman850a5452011-10-13 22:25:23 +00006581 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006582
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00006583 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006584 dev_put(dev);
6585}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006586
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006587static void rollback_registered(struct net_device *dev)
6588{
6589 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006590
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006591 list_add(&dev->unreg_list, &single);
6592 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006593 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006594}
6595
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006596static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
6597 struct net_device *upper, netdev_features_t features)
6598{
6599 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6600 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006601 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006602
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006603 for_each_netdev_feature(&upper_disables, feature_bit) {
6604 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006605 if (!(upper->wanted_features & feature)
6606 && (features & feature)) {
6607 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
6608 &feature, upper->name);
6609 features &= ~feature;
6610 }
6611 }
6612
6613 return features;
6614}
6615
6616static void netdev_sync_lower_features(struct net_device *upper,
6617 struct net_device *lower, netdev_features_t features)
6618{
6619 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6620 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006621 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006622
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006623 for_each_netdev_feature(&upper_disables, feature_bit) {
6624 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006625 if (!(features & feature) && (lower->features & feature)) {
6626 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
6627 &feature, lower->name);
6628 lower->wanted_features &= ~feature;
6629 netdev_update_features(lower);
6630
6631 if (unlikely(lower->features & feature))
6632 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
6633 &feature, lower->name);
6634 }
6635 }
6636}
6637
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006638static netdev_features_t netdev_fix_features(struct net_device *dev,
6639 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07006640{
Michał Mirosław57422dc2011-01-22 12:14:12 +00006641 /* Fix illegal checksum combinations */
6642 if ((features & NETIF_F_HW_CSUM) &&
6643 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006644 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00006645 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6646 }
6647
Herbert Xub63365a2008-10-23 01:11:29 -07006648 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00006649 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006650 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00006651 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07006652 }
6653
Pravin B Shelarec5f0612013-03-07 09:28:01 +00006654 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6655 !(features & NETIF_F_IP_CSUM)) {
6656 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6657 features &= ~NETIF_F_TSO;
6658 features &= ~NETIF_F_TSO_ECN;
6659 }
6660
6661 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6662 !(features & NETIF_F_IPV6_CSUM)) {
6663 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6664 features &= ~NETIF_F_TSO6;
6665 }
6666
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00006667 /* TSO ECN requires that TSO is present as well. */
6668 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6669 features &= ~NETIF_F_TSO_ECN;
6670
Michał Mirosław212b5732011-02-15 16:59:16 +00006671 /* Software GSO depends on SG. */
6672 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006673 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00006674 features &= ~NETIF_F_GSO;
6675 }
6676
Michał Mirosławacd11302011-01-24 15:45:15 -08006677 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07006678 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00006679 /* maybe split UFO into V4 and V6? */
Tom Herbertc8cd0982015-12-14 11:19:44 -08006680 if (!(features & NETIF_F_HW_CSUM) &&
6681 ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
6682 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006683 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006684 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006685 features &= ~NETIF_F_UFO;
6686 }
6687
6688 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006689 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006690 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006691 features &= ~NETIF_F_UFO;
6692 }
6693 }
6694
Jiri Pirkod0290212014-04-02 23:09:31 +02006695#ifdef CONFIG_NET_RX_BUSY_POLL
6696 if (dev->netdev_ops->ndo_busy_poll)
6697 features |= NETIF_F_BUSY_POLL;
6698 else
6699#endif
6700 features &= ~NETIF_F_BUSY_POLL;
6701
Herbert Xub63365a2008-10-23 01:11:29 -07006702 return features;
6703}
Herbert Xub63365a2008-10-23 01:11:29 -07006704
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006705int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00006706{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006707 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006708 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006709 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05006710 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00006711
Michał Mirosław87267482011-04-12 09:56:38 +00006712 ASSERT_RTNL();
6713
Michał Mirosław5455c692011-02-15 16:59:17 +00006714 features = netdev_get_wanted_features(dev);
6715
6716 if (dev->netdev_ops->ndo_fix_features)
6717 features = dev->netdev_ops->ndo_fix_features(dev, features);
6718
6719 /* driver might be less strict about feature dependencies */
6720 features = netdev_fix_features(dev, features);
6721
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006722 /* some features can't be enabled if they're off an an upper device */
6723 netdev_for_each_upper_dev_rcu(dev, upper, iter)
6724 features = netdev_sync_upper_features(dev, upper, features);
6725
Michał Mirosław5455c692011-02-15 16:59:17 +00006726 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05006727 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00006728
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006729 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6730 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00006731
6732 if (dev->netdev_ops->ndo_set_features)
6733 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01006734 else
6735 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00006736
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006737 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00006738 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006739 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6740 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01006741 /* return non-0 since some features might have changed and
6742 * it's better to fire a spurious notification than miss it
6743 */
6744 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006745 }
6746
Jarod Wilsone7868a82015-11-03 23:09:32 -05006747sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006748 /* some features must be disabled on lower devices when disabled
6749 * on an upper device (think: bonding master or bridge)
6750 */
6751 netdev_for_each_lower_dev(dev, lower, iter)
6752 netdev_sync_lower_features(dev, lower, features);
6753
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006754 if (!err)
6755 dev->features = features;
6756
Jarod Wilsone7868a82015-11-03 23:09:32 -05006757 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006758}
6759
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006760/**
6761 * netdev_update_features - recalculate device features
6762 * @dev: the device to check
6763 *
6764 * Recalculate dev->features set and send notifications if it
6765 * has changed. Should be called after driver or hardware dependent
6766 * conditions might have changed that influence the features.
6767 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006768void netdev_update_features(struct net_device *dev)
6769{
6770 if (__netdev_update_features(dev))
6771 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00006772}
6773EXPORT_SYMBOL(netdev_update_features);
6774
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006776 * netdev_change_features - recalculate device features
6777 * @dev: the device to check
6778 *
6779 * Recalculate dev->features set and send notifications even
6780 * if they have not changed. Should be called instead of
6781 * netdev_update_features() if also dev->vlan_features might
6782 * have changed to allow the changes to be propagated to stacked
6783 * VLAN devices.
6784 */
6785void netdev_change_features(struct net_device *dev)
6786{
6787 __netdev_update_features(dev);
6788 netdev_features_change(dev);
6789}
6790EXPORT_SYMBOL(netdev_change_features);
6791
6792/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006793 * netif_stacked_transfer_operstate - transfer operstate
6794 * @rootdev: the root or lower level device to transfer state from
6795 * @dev: the device to transfer operstate to
6796 *
6797 * Transfer operational state from root to device. This is normally
6798 * called when a stacking relationship exists between the root
6799 * device and the device(a leaf device).
6800 */
6801void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6802 struct net_device *dev)
6803{
6804 if (rootdev->operstate == IF_OPER_DORMANT)
6805 netif_dormant_on(dev);
6806 else
6807 netif_dormant_off(dev);
6808
6809 if (netif_carrier_ok(rootdev)) {
6810 if (!netif_carrier_ok(dev))
6811 netif_carrier_on(dev);
6812 } else {
6813 if (netif_carrier_ok(dev))
6814 netif_carrier_off(dev);
6815 }
6816}
6817EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6818
Michael Daltona953be52014-01-16 22:23:28 -08006819#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006820static int netif_alloc_rx_queues(struct net_device *dev)
6821{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006822 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006823 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05306824 size_t sz = count * sizeof(*rx);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006825
Tom Herbertbd25fa72010-10-18 18:00:16 +00006826 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006827
Pankaj Gupta10595902015-01-12 11:41:28 +05306828 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6829 if (!rx) {
6830 rx = vzalloc(sz);
6831 if (!rx)
6832 return -ENOMEM;
6833 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00006834 dev->_rx = rx;
6835
Tom Herbertbd25fa72010-10-18 18:00:16 +00006836 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006837 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006838 return 0;
6839}
Tom Herbertbf264142010-11-26 08:36:09 +00006840#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006841
Changli Gaoaa942102010-12-04 02:31:41 +00006842static void netdev_init_one_queue(struct net_device *dev,
6843 struct netdev_queue *queue, void *_unused)
6844{
6845 /* Initialize queue lock */
6846 spin_lock_init(&queue->_xmit_lock);
6847 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6848 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006849 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006850 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006851#ifdef CONFIG_BQL
6852 dql_init(&queue->dql, HZ);
6853#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006854}
6855
Eric Dumazet60877a32013-06-20 01:15:51 -07006856static void netif_free_tx_queues(struct net_device *dev)
6857{
WANG Cong4cb28972014-06-02 15:55:22 -07006858 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07006859}
6860
Tom Herberte6484932010-10-18 18:04:39 +00006861static int netif_alloc_netdev_queues(struct net_device *dev)
6862{
6863 unsigned int count = dev->num_tx_queues;
6864 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07006865 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00006866
Eric Dumazetd3397272015-07-06 17:13:26 +02006867 if (count < 1 || count > 0xffff)
6868 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00006869
Eric Dumazet60877a32013-06-20 01:15:51 -07006870 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6871 if (!tx) {
6872 tx = vzalloc(sz);
6873 if (!tx)
6874 return -ENOMEM;
6875 }
Tom Herberte6484932010-10-18 18:04:39 +00006876 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006877
Tom Herberte6484932010-10-18 18:04:39 +00006878 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6879 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006880
6881 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006882}
6883
Denys Vlasenkoa2029242015-05-11 21:17:53 +02006884void netif_tx_stop_all_queues(struct net_device *dev)
6885{
6886 unsigned int i;
6887
6888 for (i = 0; i < dev->num_tx_queues; i++) {
6889 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6890 netif_tx_stop_queue(txq);
6891 }
6892}
6893EXPORT_SYMBOL(netif_tx_stop_all_queues);
6894
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006895/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006896 * register_netdevice - register a network device
6897 * @dev: device to register
6898 *
6899 * Take a completed network device structure and add it to the kernel
6900 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6901 * chain. 0 is returned on success. A negative errno code is returned
6902 * on a failure to set up the device, or if the name is a duplicate.
6903 *
6904 * Callers must hold the rtnl semaphore. You may want
6905 * register_netdev() instead of this.
6906 *
6907 * BUGS:
6908 * The locking appears insufficient to guarantee two parallel registers
6909 * will not get the same name.
6910 */
6911
6912int register_netdevice(struct net_device *dev)
6913{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006915 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916
6917 BUG_ON(dev_boot_phase);
6918 ASSERT_RTNL();
6919
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006920 might_sleep();
6921
Linus Torvalds1da177e2005-04-16 15:20:36 -07006922 /* When net_device's are persistent, this will be fatal. */
6923 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006924 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006925
David S. Millerf1f28aa2008-07-15 00:08:33 -07006926 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006927 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006928
Gao feng828de4f2012-09-13 20:58:27 +00006929 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006930 if (ret < 0)
6931 goto out;
6932
Linus Torvalds1da177e2005-04-16 15:20:36 -07006933 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006934 if (dev->netdev_ops->ndo_init) {
6935 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006936 if (ret) {
6937 if (ret > 0)
6938 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006939 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006940 }
6941 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006942
Patrick McHardyf6469682013-04-19 02:04:27 +00006943 if (((dev->hw_features | dev->features) &
6944 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00006945 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6946 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6947 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6948 ret = -EINVAL;
6949 goto err_uninit;
6950 }
6951
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006952 ret = -EBUSY;
6953 if (!dev->ifindex)
6954 dev->ifindex = dev_new_index(net);
6955 else if (__dev_get_by_index(net, dev->ifindex))
6956 goto err_uninit;
6957
Michał Mirosław5455c692011-02-15 16:59:17 +00006958 /* Transfer changeable features to wanted_features and enable
6959 * software offloads (GSO and GRO).
6960 */
6961 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006962 dev->features |= NETIF_F_SOFT_FEATURES;
6963 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006964
Michał Mirosław34324dc2011-11-15 15:29:55 +00006965 if (!(dev->flags & IFF_LOOPBACK)) {
6966 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006967 }
6968
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006969 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006970 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006971 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006972
Pravin B Shelaree579672013-03-07 09:28:08 +00006973 /* Make NETIF_F_SG inheritable to tunnel devices.
6974 */
6975 dev->hw_enc_features |= NETIF_F_SG;
6976
Simon Horman0d89d202013-05-23 21:02:52 +00006977 /* Make NETIF_F_SG inheritable to MPLS.
6978 */
6979 dev->mpls_features |= NETIF_F_SG;
6980
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006981 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6982 ret = notifier_to_errno(ret);
6983 if (ret)
6984 goto err_uninit;
6985
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006986 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006987 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006988 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006989 dev->reg_state = NETREG_REGISTERED;
6990
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006991 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006992
Linus Torvalds1da177e2005-04-16 15:20:36 -07006993 /*
6994 * Default initial state at registry is that the
6995 * device is present.
6996 */
6997
6998 set_bit(__LINK_STATE_PRESENT, &dev->state);
6999
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01007000 linkwatch_init_dev(dev);
7001
Linus Torvalds1da177e2005-04-16 15:20:36 -07007002 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007003 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007004 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04007005 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007006
Jiri Pirko948b3372013-01-08 01:38:25 +00007007 /* If the device has permanent device address, driver should
7008 * set dev_addr and also addr_assign_type should be set to
7009 * NET_ADDR_PERM (default value).
7010 */
7011 if (dev->addr_assign_type == NET_ADDR_PERM)
7012 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7013
Linus Torvalds1da177e2005-04-16 15:20:36 -07007014 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007015 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07007016 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007017 if (ret) {
7018 rollback_registered(dev);
7019 dev->reg_state = NETREG_UNREGISTERED;
7020 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007021 /*
7022 * Prevent userspace races by waiting until the network
7023 * device is fully setup before sending notifications.
7024 */
Patrick McHardya2835762010-02-26 06:34:51 +00007025 if (!dev->rtnl_link_ops ||
7026 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007027 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007028
7029out:
7030 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007031
7032err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007033 if (dev->netdev_ops->ndo_uninit)
7034 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007035 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007036}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007037EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007038
7039/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007040 * init_dummy_netdev - init a dummy network device for NAPI
7041 * @dev: device to init
7042 *
7043 * This takes a network device structure and initialize the minimum
7044 * amount of fields so it can be used to schedule NAPI polls without
7045 * registering a full blown interface. This is to be used by drivers
7046 * that need to tie several hardware interfaces to a single NAPI
7047 * poll scheduler due to HW limitations.
7048 */
7049int init_dummy_netdev(struct net_device *dev)
7050{
7051 /* Clear everything. Note we don't initialize spinlocks
7052 * are they aren't supposed to be taken by any of the
7053 * NAPI code and this dummy netdev is supposed to be
7054 * only ever used for NAPI polls
7055 */
7056 memset(dev, 0, sizeof(struct net_device));
7057
7058 /* make sure we BUG if trying to hit standard
7059 * register/unregister code path
7060 */
7061 dev->reg_state = NETREG_DUMMY;
7062
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007063 /* NAPI wants this */
7064 INIT_LIST_HEAD(&dev->napi_list);
7065
7066 /* a dummy interface is started by default */
7067 set_bit(__LINK_STATE_PRESENT, &dev->state);
7068 set_bit(__LINK_STATE_START, &dev->state);
7069
Eric Dumazet29b44332010-10-11 10:22:12 +00007070 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7071 * because users of this 'device' dont need to change
7072 * its refcount.
7073 */
7074
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007075 return 0;
7076}
7077EXPORT_SYMBOL_GPL(init_dummy_netdev);
7078
7079
7080/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007081 * register_netdev - register a network device
7082 * @dev: device to register
7083 *
7084 * Take a completed network device structure and add it to the kernel
7085 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7086 * chain. 0 is returned on success. A negative errno code is returned
7087 * on a failure to set up the device, or if the name is a duplicate.
7088 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07007089 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07007090 * and expands the device name if you passed a format string to
7091 * alloc_netdev.
7092 */
7093int register_netdev(struct net_device *dev)
7094{
7095 int err;
7096
7097 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007098 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007099 rtnl_unlock();
7100 return err;
7101}
7102EXPORT_SYMBOL(register_netdev);
7103
Eric Dumazet29b44332010-10-11 10:22:12 +00007104int netdev_refcnt_read(const struct net_device *dev)
7105{
7106 int i, refcnt = 0;
7107
7108 for_each_possible_cpu(i)
7109 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7110 return refcnt;
7111}
7112EXPORT_SYMBOL(netdev_refcnt_read);
7113
Ben Hutchings2c530402012-07-10 10:55:09 +00007114/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007115 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00007116 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117 *
7118 * This is called when unregistering network devices.
7119 *
7120 * Any protocol or device that holds a reference should register
7121 * for netdevice notification, and cleanup and put back the
7122 * reference if they receive an UNREGISTER event.
7123 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007124 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007125 */
7126static void netdev_wait_allrefs(struct net_device *dev)
7127{
7128 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00007129 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130
Eric Dumazete014deb2009-11-17 05:59:21 +00007131 linkwatch_forget_dev(dev);
7132
Linus Torvalds1da177e2005-04-16 15:20:36 -07007133 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00007134 refcnt = netdev_refcnt_read(dev);
7135
7136 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007137 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007138 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007139
7140 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007141 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007142
Eric Dumazet748e2d92012-08-22 21:50:59 +00007143 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007144 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00007145 rtnl_lock();
7146
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007147 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007148 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7149 &dev->state)) {
7150 /* We must not have linkwatch events
7151 * pending on unregister. If this
7152 * happens, we simply run the queue
7153 * unscheduled, resulting in a noop
7154 * for this device.
7155 */
7156 linkwatch_run_queue();
7157 }
7158
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007159 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160
7161 rebroadcast_time = jiffies;
7162 }
7163
7164 msleep(250);
7165
Eric Dumazet29b44332010-10-11 10:22:12 +00007166 refcnt = netdev_refcnt_read(dev);
7167
Linus Torvalds1da177e2005-04-16 15:20:36 -07007168 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007169 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7170 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007171 warning_time = jiffies;
7172 }
7173 }
7174}
7175
7176/* The sequence is:
7177 *
7178 * rtnl_lock();
7179 * ...
7180 * register_netdevice(x1);
7181 * register_netdevice(x2);
7182 * ...
7183 * unregister_netdevice(y1);
7184 * unregister_netdevice(y2);
7185 * ...
7186 * rtnl_unlock();
7187 * free_netdev(y1);
7188 * free_netdev(y2);
7189 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07007190 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007191 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007192 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07007193 * without deadlocking with linkwatch via keventd.
7194 * 2) Since we run with the RTNL semaphore not held, we can sleep
7195 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07007196 *
7197 * We must not return until all unregister events added during
7198 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007199 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007200void netdev_run_todo(void)
7201{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007202 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007203
Linus Torvalds1da177e2005-04-16 15:20:36 -07007204 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007205 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07007206
7207 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007208
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007209
7210 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00007211 if (!list_empty(&list))
7212 rcu_barrier();
7213
Linus Torvalds1da177e2005-04-16 15:20:36 -07007214 while (!list_empty(&list)) {
7215 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00007216 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217 list_del(&dev->todo_list);
7218
Eric Dumazet748e2d92012-08-22 21:50:59 +00007219 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007220 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00007221 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007222
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007223 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007224 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007225 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007226 dump_stack();
7227 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007228 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007229
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007230 dev->reg_state = NETREG_UNREGISTERED;
7231
7232 netdev_wait_allrefs(dev);
7233
7234 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00007235 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08007236 BUG_ON(!list_empty(&dev->ptype_all));
7237 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00007238 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7239 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07007240 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007241
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007242 if (dev->destructor)
7243 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007244
Eric W. Biederman50624c92013-09-23 21:19:49 -07007245 /* Report a network device has been unregistered */
7246 rtnl_lock();
7247 dev_net(dev)->dev_unreg_count--;
7248 __rtnl_unlock();
7249 wake_up(&netdev_unregistering_wq);
7250
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007251 /* Free network device */
7252 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007254}
7255
Ben Hutchings3cfde792010-07-09 09:11:52 +00007256/* Convert net_device_stats to rtnl_link_stats64. They have the same
7257 * fields in the same order, with only the type differing.
7258 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007259void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7260 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00007261{
7262#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007263 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
7264 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007265#else
7266 size_t i, n = sizeof(*stats64) / sizeof(u64);
7267 const unsigned long *src = (const unsigned long *)netdev_stats;
7268 u64 *dst = (u64 *)stats64;
7269
7270 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
7271 sizeof(*stats64) / sizeof(u64));
7272 for (i = 0; i < n; i++)
7273 dst[i] = src[i];
7274#endif
7275}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007276EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007277
Eric Dumazetd83345a2009-11-16 03:36:51 +00007278/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007279 * dev_get_stats - get network device statistics
7280 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07007281 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007282 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00007283 * Get network statistics from device. Return @storage.
7284 * The device driver may provide its own method by setting
7285 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7286 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007287 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00007288struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7289 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00007290{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007291 const struct net_device_ops *ops = dev->netdev_ops;
7292
Eric Dumazet28172732010-07-07 14:58:56 -07007293 if (ops->ndo_get_stats64) {
7294 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007295 ops->ndo_get_stats64(dev, storage);
7296 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00007297 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007298 } else {
7299 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07007300 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007301 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet015f0682014-03-27 08:45:56 -07007302 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07007303 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07007304}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007305EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07007306
Eric Dumazet24824a02010-10-02 06:11:55 +00007307struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07007308{
Eric Dumazet24824a02010-10-02 06:11:55 +00007309 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07007310
Eric Dumazet24824a02010-10-02 06:11:55 +00007311#ifdef CONFIG_NET_CLS_ACT
7312 if (queue)
7313 return queue;
7314 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7315 if (!queue)
7316 return NULL;
7317 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007318 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00007319 queue->qdisc_sleeping = &noop_qdisc;
7320 rcu_assign_pointer(dev->ingress_queue, queue);
7321#endif
7322 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07007323}
7324
Eric Dumazet2c60db02012-09-16 09:17:26 +00007325static const struct ethtool_ops default_ethtool_ops;
7326
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00007327void netdev_set_default_ethtool_ops(struct net_device *dev,
7328 const struct ethtool_ops *ops)
7329{
7330 if (dev->ethtool_ops == &default_ethtool_ops)
7331 dev->ethtool_ops = ops;
7332}
7333EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7334
Eric Dumazet74d332c2013-10-30 13:10:44 -07007335void netdev_freemem(struct net_device *dev)
7336{
7337 char *addr = (char *)dev - dev->padded;
7338
WANG Cong4cb28972014-06-02 15:55:22 -07007339 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007340}
7341
Linus Torvalds1da177e2005-04-16 15:20:36 -07007342/**
Tom Herbert36909ea2011-01-09 19:36:31 +00007343 * alloc_netdev_mqs - allocate network device
Tom Gundersenc835a672014-07-14 16:37:24 +02007344 * @sizeof_priv: size of private data to allocate space for
7345 * @name: device name format string
7346 * @name_assign_type: origin of device name
7347 * @setup: callback to initialize device
7348 * @txqs: the number of TX subqueues to allocate
7349 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07007350 *
7351 * Allocates a struct net_device with private data area for driver use
Li Zhong90e51ad2013-11-22 15:04:46 +08007352 * and performs basic initialization. Also allocates subqueue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00007353 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007354 */
Tom Herbert36909ea2011-01-09 19:36:31 +00007355struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02007356 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00007357 void (*setup)(struct net_device *),
7358 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007359{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07007361 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007362 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007363
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07007364 BUG_ON(strlen(name) >= sizeof(dev->name));
7365
Tom Herbert36909ea2011-01-09 19:36:31 +00007366 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007367 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00007368 return NULL;
7369 }
7370
Michael Daltona953be52014-01-16 22:23:28 -08007371#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00007372 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007373 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00007374 return NULL;
7375 }
7376#endif
7377
David S. Millerfd2ea0a2008-07-17 01:56:23 -07007378 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007379 if (sizeof_priv) {
7380 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007381 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007382 alloc_size += sizeof_priv;
7383 }
7384 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007385 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007386
Eric Dumazet74d332c2013-10-30 13:10:44 -07007387 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7388 if (!p)
7389 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00007390 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007391 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007392
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007393 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007394 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007395
Eric Dumazet29b44332010-10-11 10:22:12 +00007396 dev->pcpu_refcnt = alloc_percpu(int);
7397 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07007398 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007399
Linus Torvalds1da177e2005-04-16 15:20:36 -07007400 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00007401 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402
Jiri Pirko22bedad32010-04-01 21:22:57 +00007403 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007404 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00007405
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007406 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007407
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07007408 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00007409 dev->gso_max_segs = GSO_MAX_SEGS;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07007410 dev->gso_min_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007411
Herbert Xud565b0a2008-12-15 23:38:52 -08007412 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007413 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007414 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00007415 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007416 INIT_LIST_HEAD(&dev->adj_list.upper);
7417 INIT_LIST_HEAD(&dev->adj_list.lower);
7418 INIT_LIST_HEAD(&dev->all_adj_list.upper);
7419 INIT_LIST_HEAD(&dev->all_adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08007420 INIT_LIST_HEAD(&dev->ptype_all);
7421 INIT_LIST_HEAD(&dev->ptype_specific);
Eric Dumazet02875872014-10-05 18:38:35 -07007422 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007423 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007424
Phil Sutter906470c2015-08-18 10:30:48 +02007425 if (!dev->tx_queue_len)
Phil Sutterf84bb1e2015-08-27 21:21:36 +02007426 dev->priv_flags |= IFF_NO_QUEUE;
Phil Sutter906470c2015-08-18 10:30:48 +02007427
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007428 dev->num_tx_queues = txqs;
7429 dev->real_num_tx_queues = txqs;
7430 if (netif_alloc_netdev_queues(dev))
7431 goto free_all;
7432
Michael Daltona953be52014-01-16 22:23:28 -08007433#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007434 dev->num_rx_queues = rxqs;
7435 dev->real_num_rx_queues = rxqs;
7436 if (netif_alloc_rx_queues(dev))
7437 goto free_all;
7438#endif
7439
Linus Torvalds1da177e2005-04-16 15:20:36 -07007440 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02007441 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00007442 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00007443 if (!dev->ethtool_ops)
7444 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +02007445
7446 nf_hook_ingress_init(dev);
7447
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007449
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007450free_all:
7451 free_netdev(dev);
7452 return NULL;
7453
Eric Dumazet29b44332010-10-11 10:22:12 +00007454free_pcpu:
7455 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007456free_dev:
7457 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007458 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459}
Tom Herbert36909ea2011-01-09 19:36:31 +00007460EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007461
7462/**
7463 * free_netdev - free network device
7464 * @dev: device
7465 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007466 * This function does the last stage of destroying an allocated device
7467 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007468 * If this is the last reference then it will be freed.
Eric Dumazet93d05d42015-11-18 06:31:03 -08007469 * Must be called in process context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007470 */
7471void free_netdev(struct net_device *dev)
7472{
Herbert Xud565b0a2008-12-15 23:38:52 -08007473 struct napi_struct *p, *n;
7474
Eric Dumazet93d05d42015-11-18 06:31:03 -08007475 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -07007476 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08007477#ifdef CONFIG_SYSFS
Pankaj Gupta10595902015-01-12 11:41:28 +05307478 kvfree(dev->_rx);
Tom Herbertfe822242010-11-09 10:47:38 +00007479#endif
David S. Millere8a04642008-07-17 00:34:19 -07007480
Eric Dumazet33d480c2011-08-11 19:30:52 +00007481 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00007482
Jiri Pirkof001fde2009-05-05 02:48:28 +00007483 /* Flush device addresses */
7484 dev_addr_flush(dev);
7485
Herbert Xud565b0a2008-12-15 23:38:52 -08007486 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7487 netif_napi_del(p);
7488
Eric Dumazet29b44332010-10-11 10:22:12 +00007489 free_percpu(dev->pcpu_refcnt);
7490 dev->pcpu_refcnt = NULL;
7491
Stephen Hemminger3041a062006-05-26 13:25:24 -07007492 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007493 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07007494 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007495 return;
7496 }
7497
7498 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7499 dev->reg_state = NETREG_RELEASED;
7500
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07007501 /* will free via device release */
7502 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007503}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007504EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007505
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007506/**
7507 * synchronize_net - Synchronize with packet receive processing
7508 *
7509 * Wait for packets currently being received to be done.
7510 * Does not block later packets from starting.
7511 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007512void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513{
7514 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00007515 if (rtnl_is_locked())
7516 synchronize_rcu_expedited();
7517 else
7518 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007520EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007521
7522/**
Eric Dumazet44a08732009-10-27 07:03:04 +00007523 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00007525 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08007526 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007527 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08007528 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00007529 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007530 *
7531 * Callers must hold the rtnl semaphore. You may want
7532 * unregister_netdev() instead of this.
7533 */
7534
Eric Dumazet44a08732009-10-27 07:03:04 +00007535void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536{
Herbert Xua6620712007-12-12 19:21:56 -08007537 ASSERT_RTNL();
7538
Eric Dumazet44a08732009-10-27 07:03:04 +00007539 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007540 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00007541 } else {
7542 rollback_registered(dev);
7543 /* Finish processing unregister after unlock */
7544 net_set_todo(dev);
7545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546}
Eric Dumazet44a08732009-10-27 07:03:04 +00007547EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007548
7549/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007550 * unregister_netdevice_many - unregister many devices
7551 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07007552 *
7553 * Note: As most callers use a stack allocated list_head,
7554 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007555 */
7556void unregister_netdevice_many(struct list_head *head)
7557{
7558 struct net_device *dev;
7559
7560 if (!list_empty(head)) {
7561 rollback_registered_many(head);
7562 list_for_each_entry(dev, head, unreg_list)
7563 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07007564 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007565 }
7566}
Eric Dumazet63c80992009-10-27 07:06:49 +00007567EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007568
7569/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007570 * unregister_netdev - remove device from the kernel
7571 * @dev: device
7572 *
7573 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08007574 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007575 *
7576 * This is just a wrapper for unregister_netdevice that takes
7577 * the rtnl semaphore. In general you want to use this and not
7578 * unregister_netdevice.
7579 */
7580void unregister_netdev(struct net_device *dev)
7581{
7582 rtnl_lock();
7583 unregister_netdevice(dev);
7584 rtnl_unlock();
7585}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007586EXPORT_SYMBOL(unregister_netdev);
7587
Eric W. Biedermance286d32007-09-12 13:53:49 +02007588/**
7589 * dev_change_net_namespace - move device to different nethost namespace
7590 * @dev: device
7591 * @net: network namespace
7592 * @pat: If not NULL name pattern to try if the current device name
7593 * is already taken in the destination network namespace.
7594 *
7595 * This function shuts down a device interface and moves it
7596 * to a new network namespace. On success 0 is returned, on
7597 * a failure a netagive errno code is returned.
7598 *
7599 * Callers must hold the rtnl semaphore.
7600 */
7601
7602int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7603{
Eric W. Biedermance286d32007-09-12 13:53:49 +02007604 int err;
7605
7606 ASSERT_RTNL();
7607
7608 /* Don't allow namespace local devices to be moved. */
7609 err = -EINVAL;
7610 if (dev->features & NETIF_F_NETNS_LOCAL)
7611 goto out;
7612
7613 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02007614 if (dev->reg_state != NETREG_REGISTERED)
7615 goto out;
7616
7617 /* Get out if there is nothing todo */
7618 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09007619 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02007620 goto out;
7621
7622 /* Pick the destination device name, and ensure
7623 * we can use it in the destination network namespace.
7624 */
7625 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00007626 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007627 /* We get here if we can't use the current device name */
7628 if (!pat)
7629 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00007630 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007631 goto out;
7632 }
7633
7634 /*
7635 * And now a mini version of register_netdevice unregister_netdevice.
7636 */
7637
7638 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07007639 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007640
7641 /* And unlink it from device chain */
7642 err = -ENODEV;
7643 unlist_netdevice(dev);
7644
7645 synchronize_net();
7646
7647 /* Shutdown queueing discipline. */
7648 dev_shutdown(dev);
7649
7650 /* Notify protocols, that we are about to destroy
7651 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00007652
7653 Note that dev->reg_state stays at NETREG_REGISTERED.
7654 This is wanted because this way 8021q and macvlan know
7655 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02007656 */
7657 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00007658 rcu_barrier();
7659 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007660 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007661
7662 /*
7663 * Flush the unicast and multicast chains
7664 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007665 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00007666 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007667
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007668 /* Send a netdev-removed uevent to the old namespace */
7669 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007670 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007671
Eric W. Biedermance286d32007-09-12 13:53:49 +02007672 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007673 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007674
Eric W. Biedermance286d32007-09-12 13:53:49 +02007675 /* If there is an ifindex conflict assign a new one */
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02007676 if (__dev_get_by_index(net, dev->ifindex))
Eric W. Biedermance286d32007-09-12 13:53:49 +02007677 dev->ifindex = dev_new_index(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007678
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007679 /* Send a netdev-add uevent to the new namespace */
7680 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007681 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007682
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007683 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07007684 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007685 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007686
7687 /* Add the device back in the hashes */
7688 list_netdevice(dev);
7689
7690 /* Notify protocols, that a new device appeared. */
7691 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7692
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007693 /*
7694 * Prevent userspace races by waiting until the network
7695 * device is fully setup before sending notifications.
7696 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007697 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007698
Eric W. Biedermance286d32007-09-12 13:53:49 +02007699 synchronize_net();
7700 err = 0;
7701out:
7702 return err;
7703}
Johannes Berg463d0182009-07-14 00:33:35 +02007704EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007705
Linus Torvalds1da177e2005-04-16 15:20:36 -07007706static int dev_cpu_callback(struct notifier_block *nfb,
7707 unsigned long action,
7708 void *ocpu)
7709{
7710 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007711 struct sk_buff *skb;
7712 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7713 struct softnet_data *sd, *oldsd;
7714
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007715 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716 return NOTIFY_OK;
7717
7718 local_irq_disable();
7719 cpu = smp_processor_id();
7720 sd = &per_cpu(softnet_data, cpu);
7721 oldsd = &per_cpu(softnet_data, oldcpu);
7722
7723 /* Find end of our completion_queue. */
7724 list_skb = &sd->completion_queue;
7725 while (*list_skb)
7726 list_skb = &(*list_skb)->next;
7727 /* Append completion queue from offline CPU. */
7728 *list_skb = oldsd->completion_queue;
7729 oldsd->completion_queue = NULL;
7730
Linus Torvalds1da177e2005-04-16 15:20:36 -07007731 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00007732 if (oldsd->output_queue) {
7733 *sd->output_queue_tailp = oldsd->output_queue;
7734 sd->output_queue_tailp = oldsd->output_queue_tailp;
7735 oldsd->output_queue = NULL;
7736 oldsd->output_queue_tailp = &oldsd->output_queue;
7737 }
Eric Dumazetac64da02015-01-15 17:04:22 -08007738 /* Append NAPI poll list from offline CPU, with one exception :
7739 * process_backlog() must be called by cpu owning percpu backlog.
7740 * We properly handle process_queue & input_pkt_queue later.
7741 */
7742 while (!list_empty(&oldsd->poll_list)) {
7743 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7744 struct napi_struct,
7745 poll_list);
7746
7747 list_del_init(&napi->poll_list);
7748 if (napi->poll == process_backlog)
7749 napi->state = 0;
7750 else
7751 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00007752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007753
7754 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7755 local_irq_enable();
7756
7757 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00007758 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08007759 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00007760 input_queue_head_incr(oldsd);
7761 }
Eric Dumazetac64da02015-01-15 17:04:22 -08007762 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08007763 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00007764 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07007765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007766
7767 return NOTIFY_OK;
7768}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007769
7770
Herbert Xu7f353bf2007-08-10 15:47:58 -07007771/**
Herbert Xub63365a2008-10-23 01:11:29 -07007772 * netdev_increment_features - increment feature set by one
7773 * @all: current feature set
7774 * @one: new feature set
7775 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07007776 *
7777 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07007778 * @one to the master device with current feature set @all. Will not
7779 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07007780 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007781netdev_features_t netdev_increment_features(netdev_features_t all,
7782 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07007783{
Tom Herbertc8cd0982015-12-14 11:19:44 -08007784 if (mask & NETIF_F_HW_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08007785 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +00007786 mask |= NETIF_F_VLAN_CHALLENGED;
7787
Tom Herberta1882222015-12-14 11:19:43 -08007788 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +00007789 all &= one | ~NETIF_F_ALL_FOR_ALL;
7790
Michał Mirosław1742f182011-04-22 06:31:16 +00007791 /* If one device supports hw checksumming, set for all. */
Tom Herbertc8cd0982015-12-14 11:19:44 -08007792 if (all & NETIF_F_HW_CSUM)
7793 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007794
7795 return all;
7796}
Herbert Xub63365a2008-10-23 01:11:29 -07007797EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007798
Baruch Siach430f03c2013-06-02 20:43:55 +00007799static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007800{
7801 int i;
7802 struct hlist_head *hash;
7803
7804 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7805 if (hash != NULL)
7806 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7807 INIT_HLIST_HEAD(&hash[i]);
7808
7809 return hash;
7810}
7811
Eric W. Biederman881d9662007-09-17 11:56:21 -07007812/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07007813static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007814{
Rustad, Mark D734b6542012-07-18 09:06:07 +00007815 if (net != &init_net)
7816 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07007817
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007818 net->dev_name_head = netdev_create_hash();
7819 if (net->dev_name_head == NULL)
7820 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007821
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007822 net->dev_index_head = netdev_create_hash();
7823 if (net->dev_index_head == NULL)
7824 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007825
7826 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007827
7828err_idx:
7829 kfree(net->dev_name_head);
7830err_name:
7831 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007832}
7833
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007834/**
7835 * netdev_drivername - network driver for the device
7836 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007837 *
7838 * Determine network driver for device.
7839 */
David S. Miller3019de12011-06-06 16:41:33 -07007840const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07007841{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07007842 const struct device_driver *driver;
7843 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07007844 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07007845
7846 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007847 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07007848 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007849
7850 driver = parent->driver;
7851 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07007852 return driver->name;
7853 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007854}
7855
Joe Perches6ea754e2014-09-22 11:10:50 -07007856static void __netdev_printk(const char *level, const struct net_device *dev,
7857 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00007858{
Joe Perchesb004ff42012-09-12 20:12:19 -07007859 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07007860 dev_printk_emit(level[1] - '0',
7861 dev->dev.parent,
7862 "%s %s %s%s: %pV",
7863 dev_driver_string(dev->dev.parent),
7864 dev_name(dev->dev.parent),
7865 netdev_name(dev), netdev_reg_state(dev),
7866 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007867 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07007868 printk("%s%s%s: %pV",
7869 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007870 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07007871 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007872 }
Joe Perches256df2f2010-06-27 01:02:35 +00007873}
7874
Joe Perches6ea754e2014-09-22 11:10:50 -07007875void netdev_printk(const char *level, const struct net_device *dev,
7876 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00007877{
7878 struct va_format vaf;
7879 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00007880
7881 va_start(args, format);
7882
7883 vaf.fmt = format;
7884 vaf.va = &args;
7885
Joe Perches6ea754e2014-09-22 11:10:50 -07007886 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007887
Joe Perches256df2f2010-06-27 01:02:35 +00007888 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00007889}
7890EXPORT_SYMBOL(netdev_printk);
7891
7892#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07007893void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00007894{ \
Joe Perches256df2f2010-06-27 01:02:35 +00007895 struct va_format vaf; \
7896 va_list args; \
7897 \
7898 va_start(args, fmt); \
7899 \
7900 vaf.fmt = fmt; \
7901 vaf.va = &args; \
7902 \
Joe Perches6ea754e2014-09-22 11:10:50 -07007903 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007904 \
Joe Perches256df2f2010-06-27 01:02:35 +00007905 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00007906} \
7907EXPORT_SYMBOL(func);
7908
7909define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7910define_netdev_printk_level(netdev_alert, KERN_ALERT);
7911define_netdev_printk_level(netdev_crit, KERN_CRIT);
7912define_netdev_printk_level(netdev_err, KERN_ERR);
7913define_netdev_printk_level(netdev_warn, KERN_WARNING);
7914define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7915define_netdev_printk_level(netdev_info, KERN_INFO);
7916
Pavel Emelyanov46650792007-10-08 20:38:39 -07007917static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007918{
7919 kfree(net->dev_name_head);
7920 kfree(net->dev_index_head);
7921}
7922
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007923static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007924 .init = netdev_init,
7925 .exit = netdev_exit,
7926};
7927
Pavel Emelyanov46650792007-10-08 20:38:39 -07007928static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007929{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007930 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007931 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007932 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007933 * initial network namespace
7934 */
7935 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007936 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007937 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007938 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007939
7940 /* Ignore unmoveable devices (i.e. loopback) */
7941 if (dev->features & NETIF_F_NETNS_LOCAL)
7942 continue;
7943
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007944 /* Leave virtual devices for the generic cleanup */
7945 if (dev->rtnl_link_ops)
7946 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007947
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007948 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007949 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7950 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007951 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007952 pr_emerg("%s: failed to move %s to init_net: %d\n",
7953 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007954 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007955 }
7956 }
7957 rtnl_unlock();
7958}
7959
Eric W. Biederman50624c92013-09-23 21:19:49 -07007960static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7961{
7962 /* Return with the rtnl_lock held when there are no network
7963 * devices unregistering in any network namespace in net_list.
7964 */
7965 struct net *net;
7966 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01007967 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007968
Peter Zijlstraff960a72014-10-29 17:04:56 +01007969 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007970 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07007971 unregistering = false;
7972 rtnl_lock();
7973 list_for_each_entry(net, net_list, exit_list) {
7974 if (net->dev_unreg_count > 0) {
7975 unregistering = true;
7976 break;
7977 }
7978 }
7979 if (!unregistering)
7980 break;
7981 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01007982
7983 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007984 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01007985 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007986}
7987
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007988static void __net_exit default_device_exit_batch(struct list_head *net_list)
7989{
7990 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007991 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007992 * Do this across as many network namespaces as possible to
7993 * improve batching efficiency.
7994 */
7995 struct net_device *dev;
7996 struct net *net;
7997 LIST_HEAD(dev_kill_list);
7998
Eric W. Biederman50624c92013-09-23 21:19:49 -07007999 /* To prevent network device cleanup code from dereferencing
8000 * loopback devices or network devices that have been freed
8001 * wait here for all pending unregistrations to complete,
8002 * before unregistring the loopback device and allowing the
8003 * network namespace be freed.
8004 *
8005 * The netdev todo list containing all network devices
8006 * unregistrations that happen in default_device_exit_batch
8007 * will run in the rtnl_unlock() at the end of
8008 * default_device_exit_batch.
8009 */
8010 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008011 list_for_each_entry(net, net_list, exit_list) {
8012 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02008013 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008014 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8015 else
8016 unregister_netdevice_queue(dev, &dev_kill_list);
8017 }
8018 }
8019 unregister_netdevice_many(&dev_kill_list);
8020 rtnl_unlock();
8021}
8022
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008023static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008024 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008025 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02008026};
8027
Linus Torvalds1da177e2005-04-16 15:20:36 -07008028/*
8029 * Initialize the DEV module. At boot time this walks the device list and
8030 * unhooks any devices that fail to initialise (normally hardware not
8031 * present) and leaves us with a valid list of present and active devices.
8032 *
8033 */
8034
8035/*
8036 * This is called single threaded during boot, so no need
8037 * to take the rtnl semaphore.
8038 */
8039static int __init net_dev_init(void)
8040{
8041 int i, rc = -ENOMEM;
8042
8043 BUG_ON(!dev_boot_phase);
8044
Linus Torvalds1da177e2005-04-16 15:20:36 -07008045 if (dev_proc_init())
8046 goto out;
8047
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008048 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07008049 goto out;
8050
8051 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08008052 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008053 INIT_LIST_HEAD(&ptype_base[i]);
8054
Vlad Yasevich62532da2012-11-15 08:49:10 +00008055 INIT_LIST_HEAD(&offload_base);
8056
Eric W. Biederman881d9662007-09-17 11:56:21 -07008057 if (register_pernet_subsys(&netdev_net_ops))
8058 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008059
8060 /*
8061 * Initialise the packet receive queues.
8062 */
8063
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07008064 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008065 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008066
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008067 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07008068 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008069 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00008070 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00008071#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008072 sd->csd.func = rps_trigger_softirq;
8073 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008074 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07008075#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00008076
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008077 sd->backlog.poll = process_backlog;
8078 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008079 }
8080
Linus Torvalds1da177e2005-04-16 15:20:36 -07008081 dev_boot_phase = 0;
8082
Eric W. Biederman505d4f72008-11-07 22:54:20 -08008083 /* The loopback device is special if any other network devices
8084 * is present in a network namespace the loopback device must
8085 * be present. Since we now dynamically allocate and free the
8086 * loopback device ensure this invariant is maintained by
8087 * keeping the loopback device as the first device on the
8088 * list of network devices. Ensuring the loopback devices
8089 * is the first device that appears and the last network device
8090 * that disappears.
8091 */
8092 if (register_pernet_device(&loopback_net_ops))
8093 goto out;
8094
8095 if (register_pernet_device(&default_device_ops))
8096 goto out;
8097
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008098 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8099 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008100
8101 hotcpu_notifier(dev_cpu_callback, 0);
Thomas Graff38a9eb2015-07-21 10:43:56 +02008102 dst_subsys_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008103 rc = 0;
8104out:
8105 return rc;
8106}
8107
8108subsys_initcall(net_dev_init);