blob: d07aa5ffb511e408b166624d424130c5105560ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
tcharding722c9a02017-02-09 17:56:04 +11002 * NET3 Protocol independent device support routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
tcharding722c9a02017-02-09 17:56:04 +110010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
tcharding722c9a02017-02-09 17:56:04 +110024 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
tcharding722c9a02017-02-09 17:56:04 +110039 * Alan Cox : Fix ETH_P_ALL echoback lengths.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
tcharding722c9a02017-02-09 17:56:04 +110049 * Alan Cox : Fixed nasty side effect of device close
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
tcharding722c9a02017-02-09 17:56:04 +110070 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * - netif_rx() feedback
73 */
74
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080075#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Brenden Blancoa7862b42016-07-19 12:16:48 -070097#include <linux/bpf.h>
David S. Millerb5cdae32017-04-18 15:36:58 -040098#include <linux/bpf_trace.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020099#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -0800101#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700105#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#include <net/pkt_sched.h>
107#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000108#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <linux/highmem.h>
110#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700125#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700126#include <linux/ipv6.h>
127#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700128#include <linux/jhash.h>
129#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700130#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900131#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900132#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000133#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700134#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000135#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100136#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300137#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700138#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100139#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400140#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800141#include <linux/hrtimer.h>
Pablo Neirae687ad62015-05-13 18:19:38 +0200142#include <linux/netfilter_ingress.h>
Hariprasad Shenai40e4e712016-06-08 18:09:08 +0530143#include <linux/crash_dump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700145#include "net-sysfs.h"
146
Herbert Xud565b0a2008-12-15 23:38:52 -0800147/* Instead of increasing this, you should create a hash table. */
148#define MAX_GRO_SKBS 8
149
Herbert Xu5d38a072009-01-04 16:13:40 -0800150/* This should be increased if a protocol with a bigger head is added. */
151#define GRO_MAX_HEAD (MAX_HEADER + 128)
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000154static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000155struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
156struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000157static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000159static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli54951192014-07-01 21:39:43 -0700160static int call_netdevice_notifiers_info(unsigned long val,
161 struct net_device *dev,
162 struct netdev_notifier_info *info);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700165 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 * semaphore.
167 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800168 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 *
170 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700171 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 * actual updates. This allows pure readers to access the list even
173 * while a writer is preparing to update it.
174 *
175 * To put it another way, dev_base_lock is held for writing only to
176 * protect against pure readers; the rtnl semaphore provides the
177 * protection against other writers.
178 *
179 * See, for example usages, register_netdevice() and
180 * unregister_netdevice(), which must be called with the rtnl
181 * semaphore held.
182 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184EXPORT_SYMBOL(dev_base_lock);
185
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300186/* protects napi_hash addition/deletion and napi_gen_id */
187static DEFINE_SPINLOCK(napi_hash_lock);
188
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800189static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800190static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300191
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200192static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000193
Thomas Graf4e985ad2011-06-21 03:11:20 +0000194static inline void dev_base_seq_inc(struct net *net)
195{
tcharding643aa9c2017-02-09 17:56:05 +1100196 while (++net->dev_base_seq == 0)
197 ;
Thomas Graf4e985ad2011-06-21 03:11:20 +0000198}
199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
Linus Torvalds8387ff22016-06-10 07:51:30 -0700202 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
Eric Dumazet95c96172012-04-15 05:58:06 +0000203
stephen hemminger08e98972009-11-10 07:20:34 +0000204 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
Eric W. Biederman881d9662007-09-17 11:56:21 -0700207static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700209 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000212static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000213{
214#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000215 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000216#endif
217}
218
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000219static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000220{
221#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000222 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000223#endif
224}
225
Eric W. Biedermance286d32007-09-12 13:53:49 +0200226/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000227static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200228{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900229 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200230
231 ASSERT_RTNL();
232
233 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800234 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000235 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000236 hlist_add_head_rcu(&dev->index_hlist,
237 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200238 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000239
240 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200241}
242
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000243/* Device list removal
244 * caller must respect a RCU grace period before freeing/reusing dev
245 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200246static void unlist_netdevice(struct net_device *dev)
247{
248 ASSERT_RTNL();
249
250 /* Unlink dev from the device chain */
251 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800252 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000253 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000254 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200255 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000256
257 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200258}
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260/*
261 * Our notifier list
262 */
263
Alan Sternf07d5b92006-05-09 15:23:03 -0700264static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
266/*
267 * Device drivers call our routines to queue packets here. We empty the
268 * queue in the local softnet handler.
269 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700270
Eric Dumazet9958da02010-04-17 04:17:02 +0000271DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700272EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
David S. Millercf508b12008-07-22 14:16:42 -0700274#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700275/*
David S. Millerc773e842008-07-08 23:13:53 -0700276 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700277 * according to dev->type
278 */
tcharding643aa9c2017-02-09 17:56:05 +1100279static const unsigned short netdev_lock_type[] = {
280 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700281 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
282 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
283 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
284 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
285 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
286 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
287 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
288 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
289 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
290 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
291 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400292 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
293 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
294 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
tcharding643aa9c2017-02-09 17:56:05 +1100296static const char *const netdev_lock_name[] = {
297 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
298 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
299 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
300 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
301 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
302 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
303 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
304 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
305 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
306 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
307 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
308 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
309 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
310 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
311 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700312
313static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700314static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700315
316static inline unsigned short netdev_lock_pos(unsigned short dev_type)
317{
318 int i;
319
320 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
321 if (netdev_lock_type[i] == dev_type)
322 return i;
323 /* the last key is used by default */
324 return ARRAY_SIZE(netdev_lock_type) - 1;
325}
326
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700329{
330 int i;
331
332 i = netdev_lock_pos(dev_type);
333 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
334 netdev_lock_name[i]);
335}
David S. Millercf508b12008-07-22 14:16:42 -0700336
337static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
338{
339 int i;
340
341 i = netdev_lock_pos(dev->type);
342 lockdep_set_class_and_name(&dev->addr_list_lock,
343 &netdev_addr_lock_key[i],
344 netdev_lock_name[i]);
345}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700346#else
David S. Millercf508b12008-07-22 14:16:42 -0700347static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
348 unsigned short dev_type)
349{
350}
351static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700352{
353}
354#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100357 *
358 * Protocol management and registration routines
359 *
360 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 * Add a protocol ID to the list. Now that the input handler is
365 * smarter we can dispense with all the messy stuff that used to be
366 * here.
367 *
368 * BEWARE!!! Protocol handlers, mangling input packets,
369 * MUST BE last in hash buckets and checking protocol handlers
370 * MUST start from promiscuous ptype_all chain in net_bh.
371 * It is true now, do not change it.
372 * Explanation follows: if protocol handler, mangling packet, will
373 * be the first on list, it is not able to sense, that packet
374 * is cloned and should be copied-on-write, so that it will
375 * change it and subsequent readers will get broken packet.
376 * --ANK (980803)
377 */
378
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000379static inline struct list_head *ptype_head(const struct packet_type *pt)
380{
381 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800382 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000383 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800384 return pt->dev ? &pt->dev->ptype_specific :
385 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000386}
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/**
389 * dev_add_pack - add packet handler
390 * @pt: packet type declaration
391 *
392 * Add a protocol handler to the networking stack. The passed &packet_type
393 * is linked into kernel lists and may not be freed until it has been
394 * removed from the kernel lists.
395 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900396 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 * guarantee all CPU's that are in middle of receiving packets
398 * will see the new packet type (until the next received packet).
399 */
400
401void dev_add_pack(struct packet_type *pt)
402{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000403 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000405 spin_lock(&ptype_lock);
406 list_add_rcu(&pt->list, head);
407 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700409EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411/**
412 * __dev_remove_pack - remove packet handler
413 * @pt: packet type declaration
414 *
415 * Remove a protocol handler that was previously added to the kernel
416 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
417 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900418 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 *
420 * The packet type might still be in use by receivers
421 * and must not be freed until after all the CPU's have gone
422 * through a quiescent state.
423 */
424void __dev_remove_pack(struct packet_type *pt)
425{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000426 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 struct packet_type *pt1;
428
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000429 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
431 list_for_each_entry(pt1, head, list) {
432 if (pt == pt1) {
433 list_del_rcu(&pt->list);
434 goto out;
435 }
436 }
437
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000438 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000440 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700442EXPORT_SYMBOL(__dev_remove_pack);
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444/**
445 * dev_remove_pack - remove packet handler
446 * @pt: packet type declaration
447 *
448 * Remove a protocol handler that was previously added to the kernel
449 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
450 * from the kernel lists and can be freed or reused once this function
451 * returns.
452 *
453 * This call sleeps to guarantee that no CPU is looking at the packet
454 * type after return.
455 */
456void dev_remove_pack(struct packet_type *pt)
457{
458 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 synchronize_net();
461}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700462EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Vlad Yasevich62532da2012-11-15 08:49:10 +0000464
465/**
466 * dev_add_offload - register offload handlers
467 * @po: protocol offload declaration
468 *
469 * Add protocol offload handlers to the networking stack. The passed
470 * &proto_offload is linked into kernel lists and may not be freed until
471 * it has been removed from the kernel lists.
472 *
473 * This call does not sleep therefore it can not
474 * guarantee all CPU's that are in middle of receiving packets
475 * will see the new offload handlers (until the next received packet).
476 */
477void dev_add_offload(struct packet_offload *po)
478{
David S. Millerbdef7de2015-06-01 14:56:09 -0700479 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000480
481 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700482 list_for_each_entry(elem, &offload_base, list) {
483 if (po->priority < elem->priority)
484 break;
485 }
486 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000487 spin_unlock(&offload_lock);
488}
489EXPORT_SYMBOL(dev_add_offload);
490
491/**
492 * __dev_remove_offload - remove offload handler
493 * @po: packet offload declaration
494 *
495 * Remove a protocol offload handler that was previously added to the
496 * kernel offload handlers by dev_add_offload(). The passed &offload_type
497 * is removed from the kernel lists and can be freed or reused once this
498 * function returns.
499 *
500 * The packet type might still be in use by receivers
501 * and must not be freed until after all the CPU's have gone
502 * through a quiescent state.
503 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800504static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000505{
506 struct list_head *head = &offload_base;
507 struct packet_offload *po1;
508
Eric Dumazetc53aa502012-11-16 08:08:23 +0000509 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000510
511 list_for_each_entry(po1, head, list) {
512 if (po == po1) {
513 list_del_rcu(&po->list);
514 goto out;
515 }
516 }
517
518 pr_warn("dev_remove_offload: %p not found\n", po);
519out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000520 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000521}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000522
523/**
524 * dev_remove_offload - remove packet offload handler
525 * @po: packet offload declaration
526 *
527 * Remove a packet offload handler that was previously added to the kernel
528 * offload handlers by dev_add_offload(). The passed &offload_type is
529 * removed from the kernel lists and can be freed or reused once this
530 * function returns.
531 *
532 * This call sleeps to guarantee that no CPU is looking at the packet
533 * type after return.
534 */
535void dev_remove_offload(struct packet_offload *po)
536{
537 __dev_remove_offload(po);
538
539 synchronize_net();
540}
541EXPORT_SYMBOL(dev_remove_offload);
542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543/******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100544 *
545 * Device Boot-time Settings Routines
546 *
547 ******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
549/* Boot time configuration table */
550static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
551
552/**
553 * netdev_boot_setup_add - add new setup entry
554 * @name: name of the device
555 * @map: configured settings for the device
556 *
557 * Adds new setup entry to the dev_boot_setup list. The function
558 * returns 0 on error and 1 on success. This is a generic routine to
559 * all netdevices.
560 */
561static int netdev_boot_setup_add(char *name, struct ifmap *map)
562{
563 struct netdev_boot_setup *s;
564 int i;
565
566 s = dev_boot_setup;
567 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
568 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
569 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700570 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 memcpy(&s[i].map, map, sizeof(s[i].map));
572 break;
573 }
574 }
575
576 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
577}
578
579/**
tcharding722c9a02017-02-09 17:56:04 +1100580 * netdev_boot_setup_check - check boot time settings
581 * @dev: the netdevice
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 *
tcharding722c9a02017-02-09 17:56:04 +1100583 * Check boot time settings for the device.
584 * The found settings are set for the device to be used
585 * later in the device probing.
586 * Returns 0 if no settings found, 1 if they are.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 */
588int netdev_boot_setup_check(struct net_device *dev)
589{
590 struct netdev_boot_setup *s = dev_boot_setup;
591 int i;
592
593 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
594 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700595 !strcmp(dev->name, s[i].name)) {
tcharding722c9a02017-02-09 17:56:04 +1100596 dev->irq = s[i].map.irq;
597 dev->base_addr = s[i].map.base_addr;
598 dev->mem_start = s[i].map.mem_start;
599 dev->mem_end = s[i].map.mem_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 return 1;
601 }
602 }
603 return 0;
604}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700605EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
607
608/**
tcharding722c9a02017-02-09 17:56:04 +1100609 * netdev_boot_base - get address from boot time settings
610 * @prefix: prefix for network device
611 * @unit: id for network device
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 *
tcharding722c9a02017-02-09 17:56:04 +1100613 * Check boot time settings for the base address of device.
614 * The found settings are set for the device to be used
615 * later in the device probing.
616 * Returns 0 if no settings found.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 */
618unsigned long netdev_boot_base(const char *prefix, int unit)
619{
620 const struct netdev_boot_setup *s = dev_boot_setup;
621 char name[IFNAMSIZ];
622 int i;
623
624 sprintf(name, "%s%d", prefix, unit);
625
626 /*
627 * If device already registered then return base of 1
628 * to indicate not to probe for this interface
629 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700630 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 return 1;
632
633 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
634 if (!strcmp(name, s[i].name))
635 return s[i].map.base_addr;
636 return 0;
637}
638
639/*
640 * Saves at boot time configured settings for any netdevice.
641 */
642int __init netdev_boot_setup(char *str)
643{
644 int ints[5];
645 struct ifmap map;
646
647 str = get_options(str, ARRAY_SIZE(ints), ints);
648 if (!str || !*str)
649 return 0;
650
651 /* Save settings */
652 memset(&map, 0, sizeof(map));
653 if (ints[0] > 0)
654 map.irq = ints[1];
655 if (ints[0] > 1)
656 map.base_addr = ints[2];
657 if (ints[0] > 2)
658 map.mem_start = ints[3];
659 if (ints[0] > 3)
660 map.mem_end = ints[4];
661
662 /* Add new entry to the list */
663 return netdev_boot_setup_add(str, &map);
664}
665
666__setup("netdev=", netdev_boot_setup);
667
668/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100669 *
670 * Device Interface Subroutines
671 *
672 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200675 * dev_get_iflink - get 'iflink' value of a interface
676 * @dev: targeted interface
677 *
678 * Indicates the ifindex the interface is linked to.
679 * Physical interfaces have the same 'ifindex' and 'iflink' values.
680 */
681
682int dev_get_iflink(const struct net_device *dev)
683{
684 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
685 return dev->netdev_ops->ndo_get_iflink(dev);
686
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200687 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200688}
689EXPORT_SYMBOL(dev_get_iflink);
690
691/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700692 * dev_fill_metadata_dst - Retrieve tunnel egress information.
693 * @dev: targeted interface
694 * @skb: The packet.
695 *
696 * For better visibility of tunnel traffic OVS needs to retrieve
697 * egress tunnel information for a packet. Following API allows
698 * user to get this info.
699 */
700int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
701{
702 struct ip_tunnel_info *info;
703
704 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
705 return -EINVAL;
706
707 info = skb_tunnel_info_unclone(skb);
708 if (!info)
709 return -ENOMEM;
710 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
711 return -EINVAL;
712
713 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
714}
715EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
716
717/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700719 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 * @name: name to find
721 *
722 * Find an interface by name. Must be called under RTNL semaphore
723 * or @dev_base_lock. If the name is found a pointer to the device
724 * is returned. If the name is not found then %NULL is returned. The
725 * reference counters are not incremented so the caller must be
726 * careful with locks.
727 */
728
Eric W. Biederman881d9662007-09-17 11:56:21 -0700729struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700731 struct net_device *dev;
732 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Sasha Levinb67bfe02013-02-27 17:06:00 -0800734 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 if (!strncmp(dev->name, name, IFNAMSIZ))
736 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700737
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 return NULL;
739}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700740EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742/**
tcharding722c9a02017-02-09 17:56:04 +1100743 * dev_get_by_name_rcu - find a device by its name
744 * @net: the applicable net namespace
745 * @name: name to find
Eric Dumazet72c95282009-10-30 07:11:27 +0000746 *
tcharding722c9a02017-02-09 17:56:04 +1100747 * Find an interface by name.
748 * If the name is found a pointer to the device is returned.
749 * If the name is not found then %NULL is returned.
750 * The reference counters are not incremented so the caller must be
751 * careful with locks. The caller must hold RCU lock.
Eric Dumazet72c95282009-10-30 07:11:27 +0000752 */
753
754struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
755{
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 struct net_device *dev;
757 struct hlist_head *head = dev_name_hash(net, name);
758
Sasha Levinb67bfe02013-02-27 17:06:00 -0800759 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000760 if (!strncmp(dev->name, name, IFNAMSIZ))
761 return dev;
762
763 return NULL;
764}
765EXPORT_SYMBOL(dev_get_by_name_rcu);
766
767/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700769 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 * @name: name to find
771 *
772 * Find an interface by name. This can be called from any
773 * context and does its own locking. The returned handle has
774 * the usage count incremented and the caller must use dev_put() to
775 * release it when it is no longer needed. %NULL is returned if no
776 * matching device is found.
777 */
778
Eric W. Biederman881d9662007-09-17 11:56:21 -0700779struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780{
781 struct net_device *dev;
782
Eric Dumazet72c95282009-10-30 07:11:27 +0000783 rcu_read_lock();
784 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 if (dev)
786 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000787 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 return dev;
789}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700790EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
792/**
793 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700794 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 * @ifindex: index of device
796 *
797 * Search for an interface by index. Returns %NULL if the device
798 * is not found or a pointer to the device. The device has not
799 * had its reference counter increased so the caller must be careful
800 * about locking. The caller must hold either the RTNL semaphore
801 * or @dev_base_lock.
802 */
803
Eric W. Biederman881d9662007-09-17 11:56:21 -0700804struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700806 struct net_device *dev;
807 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Sasha Levinb67bfe02013-02-27 17:06:00 -0800809 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 if (dev->ifindex == ifindex)
811 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 return NULL;
814}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700815EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000817/**
818 * dev_get_by_index_rcu - find a device by its ifindex
819 * @net: the applicable net namespace
820 * @ifindex: index of device
821 *
822 * Search for an interface by index. Returns %NULL if the device
823 * is not found or a pointer to the device. The device has not
824 * had its reference counter increased so the caller must be careful
825 * about locking. The caller must hold RCU lock.
826 */
827
828struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
829{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000830 struct net_device *dev;
831 struct hlist_head *head = dev_index_hash(net, ifindex);
832
Sasha Levinb67bfe02013-02-27 17:06:00 -0800833 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000834 if (dev->ifindex == ifindex)
835 return dev;
836
837 return NULL;
838}
839EXPORT_SYMBOL(dev_get_by_index_rcu);
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842/**
843 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700844 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 * @ifindex: index of device
846 *
847 * Search for an interface by index. Returns NULL if the device
848 * is not found or a pointer to the device. The device returned has
849 * had a reference added and the pointer is safe until the user calls
850 * dev_put to indicate they have finished with it.
851 */
852
Eric W. Biederman881d9662007-09-17 11:56:21 -0700853struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
855 struct net_device *dev;
856
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000857 rcu_read_lock();
858 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 if (dev)
860 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000861 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return dev;
863}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700864EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200867 * netdev_get_name - get a netdevice name, knowing its ifindex.
868 * @net: network namespace
869 * @name: a pointer to the buffer where the name will be stored.
870 * @ifindex: the ifindex of the interface to get the name from.
871 *
872 * The use of raw_seqcount_begin() and cond_resched() before
873 * retrying is required as we want to give the writers a chance
874 * to complete when CONFIG_PREEMPT is not set.
875 */
876int netdev_get_name(struct net *net, char *name, int ifindex)
877{
878 struct net_device *dev;
879 unsigned int seq;
880
881retry:
882 seq = raw_seqcount_begin(&devnet_rename_seq);
883 rcu_read_lock();
884 dev = dev_get_by_index_rcu(net, ifindex);
885 if (!dev) {
886 rcu_read_unlock();
887 return -ENODEV;
888 }
889
890 strcpy(name, dev->name);
891 rcu_read_unlock();
892 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
893 cond_resched();
894 goto retry;
895 }
896
897 return 0;
898}
899
900/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000901 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700902 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 * @type: media type of device
904 * @ha: hardware address
905 *
906 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800907 * is not found or a pointer to the device.
908 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000909 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 * and the caller must therefore be careful about locking
911 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 */
913
Eric Dumazet941666c2010-12-05 01:23:53 +0000914struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
915 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916{
917 struct net_device *dev;
918
Eric Dumazet941666c2010-12-05 01:23:53 +0000919 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 if (dev->type == type &&
921 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700922 return dev;
923
924 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925}
Eric Dumazet941666c2010-12-05 01:23:53 +0000926EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300927
Eric W. Biederman881d9662007-09-17 11:56:21 -0700928struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700929{
930 struct net_device *dev;
931
932 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700933 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700934 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700935 return dev;
936
937 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700938}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700939EXPORT_SYMBOL(__dev_getfirstbyhwtype);
940
Eric W. Biederman881d9662007-09-17 11:56:21 -0700941struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000943 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000945 rcu_read_lock();
946 for_each_netdev_rcu(net, dev)
947 if (dev->type == type) {
948 dev_hold(dev);
949 ret = dev;
950 break;
951 }
952 rcu_read_unlock();
953 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955EXPORT_SYMBOL(dev_getfirstbyhwtype);
956
957/**
WANG Cong6c555492014-09-11 15:35:09 -0700958 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700959 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 * @if_flags: IFF_* values
961 * @mask: bitmask of bits in if_flags to check
962 *
963 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000964 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700965 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 */
967
WANG Cong6c555492014-09-11 15:35:09 -0700968struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
969 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700971 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
WANG Cong6c555492014-09-11 15:35:09 -0700973 ASSERT_RTNL();
974
Pavel Emelianov7562f872007-05-03 15:13:45 -0700975 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -0700976 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700978 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 break;
980 }
981 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700982 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
WANG Cong6c555492014-09-11 15:35:09 -0700984EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986/**
987 * dev_valid_name - check if name is okay for network device
988 * @name: name string
989 *
990 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700991 * to allow sysfs to work. We also disallow any kind of
992 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 */
David S. Miller95f050b2012-03-06 16:12:15 -0500994bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700996 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500997 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700998 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500999 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001000 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -05001001 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001002
1003 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -06001004 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001005 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001006 name++;
1007 }
David S. Miller95f050b2012-03-06 16:12:15 -05001008 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001010EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001013 * __dev_alloc_name - allocate a name for a device
1014 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001016 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 *
1018 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001019 * id. It scans list of devices to build up a free map, then chooses
1020 * the first empty slot. The caller must hold the dev_base or rtnl lock
1021 * while allocating the name and adding the device in order to avoid
1022 * duplicates.
1023 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1024 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 */
1026
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001027static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
1029 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 const char *p;
1031 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001032 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 struct net_device *d;
1034
1035 p = strnchr(name, IFNAMSIZ-1, '%');
1036 if (p) {
1037 /*
1038 * Verify the string as this thing may have come from
1039 * the user. There must be either one "%d" and no other "%"
1040 * characters.
1041 */
1042 if (p[1] != 'd' || strchr(p + 2, '%'))
1043 return -EINVAL;
1044
1045 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001046 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 if (!inuse)
1048 return -ENOMEM;
1049
Eric W. Biederman881d9662007-09-17 11:56:21 -07001050 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 if (!sscanf(d->name, name, &i))
1052 continue;
1053 if (i < 0 || i >= max_netdevices)
1054 continue;
1055
1056 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001057 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 if (!strncmp(buf, d->name, IFNAMSIZ))
1059 set_bit(i, inuse);
1060 }
1061
1062 i = find_first_zero_bit(inuse, max_netdevices);
1063 free_page((unsigned long) inuse);
1064 }
1065
Octavian Purdilad9031022009-11-18 02:36:59 +00001066 if (buf != name)
1067 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001068 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
1071 /* It is possible to run out of possible slots
1072 * when the name is long and there isn't enough space left
1073 * for the digits, or if all bits are used.
1074 */
1075 return -ENFILE;
1076}
1077
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001078/**
1079 * dev_alloc_name - allocate a name for a device
1080 * @dev: device
1081 * @name: name format string
1082 *
1083 * Passed a format string - eg "lt%d" it will try and find a suitable
1084 * id. It scans list of devices to build up a free map, then chooses
1085 * the first empty slot. The caller must hold the dev_base or rtnl lock
1086 * while allocating the name and adding the device in order to avoid
1087 * duplicates.
1088 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1089 * Returns the number of the unit assigned or a negative errno code.
1090 */
1091
1092int dev_alloc_name(struct net_device *dev, const char *name)
1093{
1094 char buf[IFNAMSIZ];
1095 struct net *net;
1096 int ret;
1097
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001098 BUG_ON(!dev_net(dev));
1099 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001100 ret = __dev_alloc_name(net, name, buf);
1101 if (ret >= 0)
1102 strlcpy(dev->name, buf, IFNAMSIZ);
1103 return ret;
1104}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001105EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001106
Gao feng828de4f2012-09-13 20:58:27 +00001107static int dev_alloc_name_ns(struct net *net,
1108 struct net_device *dev,
1109 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001110{
Gao feng828de4f2012-09-13 20:58:27 +00001111 char buf[IFNAMSIZ];
1112 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001113
Gao feng828de4f2012-09-13 20:58:27 +00001114 ret = __dev_alloc_name(net, name, buf);
1115 if (ret >= 0)
1116 strlcpy(dev->name, buf, IFNAMSIZ);
1117 return ret;
1118}
1119
1120static int dev_get_valid_name(struct net *net,
1121 struct net_device *dev,
1122 const char *name)
1123{
1124 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001125
Octavian Purdilad9031022009-11-18 02:36:59 +00001126 if (!dev_valid_name(name))
1127 return -EINVAL;
1128
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001129 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001130 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001131 else if (__dev_get_by_name(net, name))
1132 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001133 else if (dev->name != name)
1134 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001135
1136 return 0;
1137}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
1139/**
1140 * dev_change_name - change name of a device
1141 * @dev: device
1142 * @newname: name (or format string) must be at least IFNAMSIZ
1143 *
1144 * Change name of a device, can pass format strings "eth%d".
1145 * for wildcarding.
1146 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001147int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148{
Tom Gundersen238fa362014-07-14 16:37:23 +02001149 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001150 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001152 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001153 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
1155 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001156 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001158 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 if (dev->flags & IFF_UP)
1160 return -EBUSY;
1161
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001162 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001163
1164 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001165 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001166 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001167 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001168
Herbert Xufcc5a032007-07-30 17:03:38 -07001169 memcpy(oldname, dev->name, IFNAMSIZ);
1170
Gao feng828de4f2012-09-13 20:58:27 +00001171 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001172 if (err < 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001173 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001174 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001177 if (oldname[0] && !strchr(oldname, '%'))
1178 netdev_info(dev, "renamed from %s\n", oldname);
1179
Tom Gundersen238fa362014-07-14 16:37:23 +02001180 old_assign_type = dev->name_assign_type;
1181 dev->name_assign_type = NET_NAME_RENAMED;
1182
Herbert Xufcc5a032007-07-30 17:03:38 -07001183rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001184 ret = device_rename(&dev->dev, dev->name);
1185 if (ret) {
1186 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001187 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001188 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001189 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001190 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001191
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001192 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001193
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001194 netdev_adjacent_rename_links(dev, oldname);
1195
Herbert Xu7f988ea2007-07-30 16:35:46 -07001196 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001197 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001198 write_unlock_bh(&dev_base_lock);
1199
1200 synchronize_rcu();
1201
1202 write_lock_bh(&dev_base_lock);
1203 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001204 write_unlock_bh(&dev_base_lock);
1205
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001206 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001207 ret = notifier_to_errno(ret);
1208
1209 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001210 /* err >= 0 after dev_alloc_name() or stores the first errno */
1211 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001212 err = ret;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001213 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001214 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001215 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001216 dev->name_assign_type = old_assign_type;
1217 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001218 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001219 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001220 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001221 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001222 }
1223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
1225 return err;
1226}
1227
1228/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001229 * dev_set_alias - change ifalias of a device
1230 * @dev: device
1231 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001232 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001233 *
1234 * Set ifalias for a device,
1235 */
1236int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1237{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001238 char *new_ifalias;
1239
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001240 ASSERT_RTNL();
1241
1242 if (len >= IFALIASZ)
1243 return -EINVAL;
1244
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001245 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001246 kfree(dev->ifalias);
1247 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001248 return 0;
1249 }
1250
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001251 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1252 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001253 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001254 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001255
1256 strlcpy(dev->ifalias, alias, len+1);
1257 return len;
1258}
1259
1260
1261/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001262 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001263 * @dev: device to cause notification
1264 *
1265 * Called to indicate a device has changed features.
1266 */
1267void netdev_features_change(struct net_device *dev)
1268{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001269 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001270}
1271EXPORT_SYMBOL(netdev_features_change);
1272
1273/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 * netdev_state_change - device changes state
1275 * @dev: device to cause notification
1276 *
1277 * Called to indicate a device has changed state. This function calls
1278 * the notifier chains for netdev_chain and sends a NEWLINK message
1279 * to the routing socket.
1280 */
1281void netdev_state_change(struct net_device *dev)
1282{
1283 if (dev->flags & IFF_UP) {
Loic Prylli54951192014-07-01 21:39:43 -07001284 struct netdev_notifier_change_info change_info;
1285
1286 change_info.flags_changed = 0;
1287 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1288 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001289 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
1291}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001292EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Amerigo Wangee89bab2012-08-09 22:14:56 +00001294/**
tcharding722c9a02017-02-09 17:56:04 +11001295 * netdev_notify_peers - notify network peers about existence of @dev
1296 * @dev: network device
Amerigo Wangee89bab2012-08-09 22:14:56 +00001297 *
1298 * Generate traffic such that interested network peers are aware of
1299 * @dev, such as by generating a gratuitous ARP. This may be used when
1300 * a device wants to inform the rest of the network about some sort of
1301 * reconfiguration such as a failover event or virtual machine
1302 * migration.
1303 */
1304void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001305{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001306 rtnl_lock();
1307 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
Vlad Yasevich37c343b2017-03-14 08:58:08 -04001308 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00001309 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001310}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001311EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001312
Patrick McHardybd380812010-02-26 06:34:53 +00001313static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001315 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001316 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001318 ASSERT_RTNL();
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 if (!netif_device_present(dev))
1321 return -ENODEV;
1322
Neil Hormanca99ca12013-02-05 08:05:43 +00001323 /* Block netpoll from trying to do any rx path servicing.
1324 * If we don't do this there is a chance ndo_poll_controller
1325 * or ndo_poll may be running while we open the device
1326 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001327 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001328
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001329 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1330 ret = notifier_to_errno(ret);
1331 if (ret)
1332 return ret;
1333
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001335
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001336 if (ops->ndo_validate_addr)
1337 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001338
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001339 if (!ret && ops->ndo_open)
1340 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
Eric W. Biederman66b55522014-03-27 15:39:03 -07001342 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001343
Jeff Garzikbada3392007-10-23 20:19:37 -07001344 if (ret)
1345 clear_bit(__LINK_STATE_START, &dev->state);
1346 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001348 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001350 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001352
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 return ret;
1354}
Patrick McHardybd380812010-02-26 06:34:53 +00001355
1356/**
1357 * dev_open - prepare an interface for use.
1358 * @dev: device to open
1359 *
1360 * Takes a device from down to up state. The device's private open
1361 * function is invoked and then the multicast lists are loaded. Finally
1362 * the device is moved into the up state and a %NETDEV_UP message is
1363 * sent to the netdev notifier chain.
1364 *
1365 * Calling this function on an active interface is a nop. On a failure
1366 * a negative errno code is returned.
1367 */
1368int dev_open(struct net_device *dev)
1369{
1370 int ret;
1371
Patrick McHardybd380812010-02-26 06:34:53 +00001372 if (dev->flags & IFF_UP)
1373 return 0;
1374
Patrick McHardybd380812010-02-26 06:34:53 +00001375 ret = __dev_open(dev);
1376 if (ret < 0)
1377 return ret;
1378
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001379 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001380 call_netdevice_notifiers(NETDEV_UP, dev);
1381
1382 return ret;
1383}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001384EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
Octavian Purdila44345722010-12-13 12:44:07 +00001386static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
Octavian Purdila44345722010-12-13 12:44:07 +00001388 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001389
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001390 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001391 might_sleep();
1392
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001393 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001394 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001395 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001396
Octavian Purdila44345722010-12-13 12:44:07 +00001397 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Octavian Purdila44345722010-12-13 12:44:07 +00001399 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
Octavian Purdila44345722010-12-13 12:44:07 +00001401 /* Synchronize to scheduled poll. We cannot touch poll list, it
1402 * can be even on different cpu. So just clear netif_running().
1403 *
1404 * dev->stop() will invoke napi_disable() on all of it's
1405 * napi_struct instances on this device.
1406 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001407 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409
Octavian Purdila44345722010-12-13 12:44:07 +00001410 dev_deactivate_many(head);
1411
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001412 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001413 const struct net_device_ops *ops = dev->netdev_ops;
1414
1415 /*
1416 * Call the device specific close. This cannot fail.
1417 * Only if device is UP
1418 *
1419 * We allow it to be called even after a DETACH hot-plug
1420 * event.
1421 */
1422 if (ops->ndo_stop)
1423 ops->ndo_stop(dev);
1424
Octavian Purdila44345722010-12-13 12:44:07 +00001425 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001426 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001427 }
1428
1429 return 0;
1430}
1431
1432static int __dev_close(struct net_device *dev)
1433{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001434 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001435 LIST_HEAD(single);
1436
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001437 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001438 retval = __dev_close_many(&single);
1439 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001440
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001441 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001442}
1443
David S. Miller99c4a262015-03-18 22:52:33 -04001444int dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001445{
1446 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001447
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001448 /* Remove the devices that don't need to be closed */
1449 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001450 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001451 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001452
1453 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001454
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001455 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001456 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001457 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001458 if (unlink)
1459 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 return 0;
1463}
David S. Miller99c4a262015-03-18 22:52:33 -04001464EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001465
1466/**
1467 * dev_close - shutdown an interface.
1468 * @dev: device to shutdown
1469 *
1470 * This function moves an active device into down state. A
1471 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1472 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1473 * chain.
1474 */
1475int dev_close(struct net_device *dev)
1476{
Eric Dumazete14a5992011-05-10 12:26:06 -07001477 if (dev->flags & IFF_UP) {
1478 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001479
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001480 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001481 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001482 list_del(&single);
1483 }
dingtianhongda6e3782013-05-27 19:53:31 +00001484 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001485}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001486EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001489/**
1490 * dev_disable_lro - disable Large Receive Offload on a device
1491 * @dev: device
1492 *
1493 * Disable Large Receive Offload (LRO) on a net device. Must be
1494 * called under RTNL. This is needed if received packets may be
1495 * forwarded to another interface.
1496 */
1497void dev_disable_lro(struct net_device *dev)
1498{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001499 struct net_device *lower_dev;
1500 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001501
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001502 dev->wanted_features &= ~NETIF_F_LRO;
1503 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001504
Michał Mirosław22d59692011-04-21 12:42:15 +00001505 if (unlikely(dev->features & NETIF_F_LRO))
1506 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001507
1508 netdev_for_each_lower_dev(dev, lower_dev, iter)
1509 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001510}
1511EXPORT_SYMBOL(dev_disable_lro);
1512
Jiri Pirko351638e2013-05-28 01:30:21 +00001513static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1514 struct net_device *dev)
1515{
1516 struct netdev_notifier_info info;
1517
1518 netdev_notifier_info_init(&info, dev);
1519 return nb->notifier_call(nb, val, &info);
1520}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001521
Eric W. Biederman881d9662007-09-17 11:56:21 -07001522static int dev_boot_phase = 1;
1523
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524/**
tcharding722c9a02017-02-09 17:56:04 +11001525 * register_netdevice_notifier - register a network notifier block
1526 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 *
tcharding722c9a02017-02-09 17:56:04 +11001528 * Register a notifier to be called when network device events occur.
1529 * The notifier passed is linked into the kernel structures and must
1530 * not be reused until it has been unregistered. A negative errno code
1531 * is returned on a failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 *
tcharding722c9a02017-02-09 17:56:04 +11001533 * When registered all registration and up events are replayed
1534 * to the new notifier to allow device to have a race free
1535 * view of the network device list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 */
1537
1538int register_netdevice_notifier(struct notifier_block *nb)
1539{
1540 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001541 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001542 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 int err;
1544
1545 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001546 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001547 if (err)
1548 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001549 if (dev_boot_phase)
1550 goto unlock;
1551 for_each_net(net) {
1552 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001553 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001554 err = notifier_to_errno(err);
1555 if (err)
1556 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Eric W. Biederman881d9662007-09-17 11:56:21 -07001558 if (!(dev->flags & IFF_UP))
1559 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001560
Jiri Pirko351638e2013-05-28 01:30:21 +00001561 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001564
1565unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 rtnl_unlock();
1567 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001568
1569rollback:
1570 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001571 for_each_net(net) {
1572 for_each_netdev(net, dev) {
1573 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001574 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001575
Eric W. Biederman881d9662007-09-17 11:56:21 -07001576 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001577 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1578 dev);
1579 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001580 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001581 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001582 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001583 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001584
RongQing.Li8f891482011-11-30 23:43:07 -05001585outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001586 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001587 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001589EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
1591/**
tcharding722c9a02017-02-09 17:56:04 +11001592 * unregister_netdevice_notifier - unregister a network notifier block
1593 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 *
tcharding722c9a02017-02-09 17:56:04 +11001595 * Unregister a notifier previously registered by
1596 * register_netdevice_notifier(). The notifier is unlinked into the
1597 * kernel structures and may then be reused. A negative errno code
1598 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001599 *
tcharding722c9a02017-02-09 17:56:04 +11001600 * After unregistering unregister and down device events are synthesized
1601 * for all devices on the device list to the removed notifier to remove
1602 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 */
1604
1605int unregister_netdevice_notifier(struct notifier_block *nb)
1606{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001607 struct net_device *dev;
1608 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001609 int err;
1610
1611 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001612 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001613 if (err)
1614 goto unlock;
1615
1616 for_each_net(net) {
1617 for_each_netdev(net, dev) {
1618 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001619 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1620 dev);
1621 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001622 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001623 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001624 }
1625 }
1626unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001627 rtnl_unlock();
1628 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001630EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001633 * call_netdevice_notifiers_info - call all network notifier blocks
1634 * @val: value passed unmodified to notifier function
1635 * @dev: net_device pointer passed unmodified to notifier function
1636 * @info: notifier information data
1637 *
1638 * Call all network notifier blocks. Parameters and return value
1639 * are as for raw_notifier_call_chain().
1640 */
1641
stephen hemminger1d143d92013-12-29 14:01:29 -08001642static int call_netdevice_notifiers_info(unsigned long val,
1643 struct net_device *dev,
1644 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001645{
1646 ASSERT_RTNL();
1647 netdev_notifier_info_init(info, dev);
1648 return raw_notifier_call_chain(&netdev_chain, val, info);
1649}
Jiri Pirko351638e2013-05-28 01:30:21 +00001650
1651/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 * call_netdevice_notifiers - call all network notifier blocks
1653 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001654 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 *
1656 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001657 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 */
1659
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001660int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661{
Jiri Pirko351638e2013-05-28 01:30:21 +00001662 struct netdev_notifier_info info;
1663
1664 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001666EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
Pablo Neira1cf519002015-05-13 18:19:37 +02001668#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02001669static struct static_key ingress_needed __read_mostly;
1670
1671void net_inc_ingress_queue(void)
1672{
1673 static_key_slow_inc(&ingress_needed);
1674}
1675EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1676
1677void net_dec_ingress_queue(void)
1678{
1679 static_key_slow_dec(&ingress_needed);
1680}
1681EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1682#endif
1683
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001684#ifdef CONFIG_NET_EGRESS
1685static struct static_key egress_needed __read_mostly;
1686
1687void net_inc_egress_queue(void)
1688{
1689 static_key_slow_inc(&egress_needed);
1690}
1691EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1692
1693void net_dec_egress_queue(void)
1694{
1695 static_key_slow_dec(&egress_needed);
1696}
1697EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1698#endif
1699
Ingo Molnarc5905af2012-02-24 08:31:31 +01001700static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001701#ifdef HAVE_JUMP_LABEL
Eric Dumazetb90e5792011-11-28 11:16:50 +00001702static atomic_t netstamp_needed_deferred;
Eric Dumazet13baa002017-03-01 14:28:39 -08001703static atomic_t netstamp_wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001704static void netstamp_clear(struct work_struct *work)
1705{
1706 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
Eric Dumazet13baa002017-03-01 14:28:39 -08001707 int wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001708
Eric Dumazet13baa002017-03-01 14:28:39 -08001709 wanted = atomic_add_return(deferred, &netstamp_wanted);
1710 if (wanted > 0)
1711 static_key_enable(&netstamp_needed);
1712 else
1713 static_key_disable(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001714}
1715static DECLARE_WORK(netstamp_work, netstamp_clear);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001716#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718void net_enable_timestamp(void)
1719{
Eric Dumazet13baa002017-03-01 14:28:39 -08001720#ifdef HAVE_JUMP_LABEL
1721 int wanted;
1722
1723 while (1) {
1724 wanted = atomic_read(&netstamp_wanted);
1725 if (wanted <= 0)
1726 break;
1727 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1728 return;
1729 }
1730 atomic_inc(&netstamp_needed_deferred);
1731 schedule_work(&netstamp_work);
1732#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001733 static_key_slow_inc(&netstamp_needed);
Eric Dumazet13baa002017-03-01 14:28:39 -08001734#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001736EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738void net_disable_timestamp(void)
1739{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001740#ifdef HAVE_JUMP_LABEL
Eric Dumazet13baa002017-03-01 14:28:39 -08001741 int wanted;
1742
1743 while (1) {
1744 wanted = atomic_read(&netstamp_wanted);
1745 if (wanted <= 1)
1746 break;
1747 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1748 return;
1749 }
1750 atomic_dec(&netstamp_needed_deferred);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001751 schedule_work(&netstamp_work);
1752#else
Ingo Molnarc5905af2012-02-24 08:31:31 +01001753 static_key_slow_dec(&netstamp_needed);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08001754#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001756EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
Eric Dumazet3b098e22010-05-15 23:57:10 -07001758static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759{
Thomas Gleixner2456e852016-12-25 11:38:40 +01001760 skb->tstamp = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001761 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001762 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763}
1764
Eric Dumazet588f0332011-11-15 04:12:55 +00001765#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001766 if (static_key_false(&netstamp_needed)) { \
Thomas Gleixner2456e852016-12-25 11:38:40 +01001767 if ((COND) && !(SKB)->tstamp) \
Eric Dumazet588f0332011-11-15 04:12:55 +00001768 __net_timestamp(SKB); \
1769 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001770
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02001771bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001772{
1773 unsigned int len;
1774
1775 if (!(dev->flags & IFF_UP))
1776 return false;
1777
1778 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1779 if (skb->len <= len)
1780 return true;
1781
1782 /* if TSO is enabled, we don't care about the length as the packet
1783 * could be forwarded without being segmented before
1784 */
1785 if (skb_is_gso(skb))
1786 return true;
1787
1788 return false;
1789}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001790EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001791
Herbert Xua0265d22014-04-17 13:45:03 +08001792int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1793{
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001794 int ret = ____dev_forward_skb(dev, skb);
1795
1796 if (likely(!ret)) {
1797 skb->protocol = eth_type_trans(skb, dev);
1798 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001799 }
1800
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08001801 return ret;
Herbert Xua0265d22014-04-17 13:45:03 +08001802}
1803EXPORT_SYMBOL_GPL(__dev_forward_skb);
1804
Arnd Bergmann44540962009-11-26 06:07:08 +00001805/**
1806 * dev_forward_skb - loopback an skb to another netif
1807 *
1808 * @dev: destination network device
1809 * @skb: buffer to forward
1810 *
1811 * return values:
1812 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001813 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001814 *
1815 * dev_forward_skb can be used for injecting an skb from the
1816 * start_xmit function of one device into the receive queue
1817 * of another device.
1818 *
1819 * The receiving device may be in another namespace, so
1820 * we have to clear all information in the skb that could
1821 * impact namespace isolation.
1822 */
1823int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1824{
Herbert Xua0265d22014-04-17 13:45:03 +08001825 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001826}
1827EXPORT_SYMBOL_GPL(dev_forward_skb);
1828
Changli Gao71d9dec2010-12-15 19:57:25 +00001829static inline int deliver_skb(struct sk_buff *skb,
1830 struct packet_type *pt_prev,
1831 struct net_device *orig_dev)
1832{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001833 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1834 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001835 atomic_inc(&skb->users);
1836 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1837}
1838
Salam Noureddine7866a622015-01-27 11:35:48 -08001839static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1840 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001841 struct net_device *orig_dev,
1842 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001843 struct list_head *ptype_list)
1844{
1845 struct packet_type *ptype, *pt_prev = *pt;
1846
1847 list_for_each_entry_rcu(ptype, ptype_list, list) {
1848 if (ptype->type != type)
1849 continue;
1850 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001851 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001852 pt_prev = ptype;
1853 }
1854 *pt = pt_prev;
1855}
1856
Eric Leblondc0de08d2012-08-16 22:02:58 +00001857static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1858{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001859 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001860 return false;
1861
1862 if (ptype->id_match)
1863 return ptype->id_match(ptype, skb->sk);
1864 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1865 return true;
1866
1867 return false;
1868}
1869
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870/*
1871 * Support routine. Sends outgoing frames to any network
1872 * taps currently in use.
1873 */
1874
David Ahern74b20582016-05-10 11:19:50 -07001875void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876{
1877 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001878 struct sk_buff *skb2 = NULL;
1879 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001880 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001881
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001883again:
1884 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 /* Never send packets back to the socket
1886 * they originated from - MvS (miquels@drinkel.ow.org)
1887 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001888 if (skb_loop_sk(ptype, skb))
1889 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001890
Salam Noureddine7866a622015-01-27 11:35:48 -08001891 if (pt_prev) {
1892 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001893 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001894 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001896
1897 /* need to clone skb, done only once */
1898 skb2 = skb_clone(skb, GFP_ATOMIC);
1899 if (!skb2)
1900 goto out_unlock;
1901
1902 net_timestamp_set(skb2);
1903
1904 /* skb->nh should be correctly
1905 * set by sender, so that the second statement is
1906 * just protection against buggy protocols.
1907 */
1908 skb_reset_mac_header(skb2);
1909
1910 if (skb_network_header(skb2) < skb2->data ||
1911 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1912 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1913 ntohs(skb2->protocol),
1914 dev->name);
1915 skb_reset_network_header(skb2);
1916 }
1917
1918 skb2->transport_header = skb2->network_header;
1919 skb2->pkt_type = PACKET_OUTGOING;
1920 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001922
1923 if (ptype_list == &ptype_all) {
1924 ptype_list = &dev->ptype_all;
1925 goto again;
1926 }
1927out_unlock:
Changli Gao71d9dec2010-12-15 19:57:25 +00001928 if (pt_prev)
1929 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 rcu_read_unlock();
1931}
David Ahern74b20582016-05-10 11:19:50 -07001932EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
Ben Hutchings2c530402012-07-10 10:55:09 +00001934/**
1935 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001936 * @dev: Network device
1937 * @txq: number of queues available
1938 *
1939 * If real_num_tx_queues is changed the tc mappings may no longer be
1940 * valid. To resolve this verify the tc mapping remains valid and if
1941 * not NULL the mapping. With no priorities mapping to this
1942 * offset/count pair it will no longer be used. In the worst case TC0
1943 * is invalid nothing can be done so disable priority mappings. If is
1944 * expected that drivers will fix this mapping if they can before
1945 * calling netif_set_real_num_tx_queues.
1946 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001947static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001948{
1949 int i;
1950 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1951
1952 /* If TC0 is invalidated disable TC mapping */
1953 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001954 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001955 dev->num_tc = 0;
1956 return;
1957 }
1958
1959 /* Invalidated prio to tc mappings set to TC0 */
1960 for (i = 1; i < TC_BITMASK + 1; i++) {
1961 int q = netdev_get_prio_tc_map(dev, i);
1962
1963 tc = &dev->tc_to_txq[q];
1964 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001965 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1966 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001967 netdev_set_prio_tc_map(dev, i, 0);
1968 }
1969 }
1970}
1971
Alexander Duyck8d059b02016-10-28 11:43:49 -04001972int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
1973{
1974 if (dev->num_tc) {
1975 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1976 int i;
1977
1978 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
1979 if ((txq - tc->offset) < tc->count)
1980 return i;
1981 }
1982
1983 return -1;
1984 }
1985
1986 return 0;
1987}
1988
Alexander Duyck537c00d2013-01-10 08:57:02 +00001989#ifdef CONFIG_XPS
1990static DEFINE_MUTEX(xps_map_mutex);
1991#define xmap_dereference(P) \
1992 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1993
Alexander Duyck6234f872016-10-28 11:46:49 -04001994static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
1995 int tci, u16 index)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001996{
1997 struct xps_map *map = NULL;
1998 int pos;
1999
2000 if (dev_maps)
Alexander Duyck6234f872016-10-28 11:46:49 -04002001 map = xmap_dereference(dev_maps->cpu_map[tci]);
2002 if (!map)
2003 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002004
Alexander Duyck6234f872016-10-28 11:46:49 -04002005 for (pos = map->len; pos--;) {
2006 if (map->queues[pos] != index)
2007 continue;
2008
2009 if (map->len > 1) {
2010 map->queues[pos] = map->queues[--map->len];
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002011 break;
2012 }
Alexander Duyck6234f872016-10-28 11:46:49 -04002013
2014 RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
2015 kfree_rcu(map, rcu);
2016 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002017 }
2018
Alexander Duyck6234f872016-10-28 11:46:49 -04002019 return true;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002020}
2021
Alexander Duyck6234f872016-10-28 11:46:49 -04002022static bool remove_xps_queue_cpu(struct net_device *dev,
2023 struct xps_dev_maps *dev_maps,
2024 int cpu, u16 offset, u16 count)
2025{
Alexander Duyck184c4492016-10-28 11:50:13 -04002026 int num_tc = dev->num_tc ? : 1;
2027 bool active = false;
2028 int tci;
Alexander Duyck6234f872016-10-28 11:46:49 -04002029
Alexander Duyck184c4492016-10-28 11:50:13 -04002030 for (tci = cpu * num_tc; num_tc--; tci++) {
2031 int i, j;
2032
2033 for (i = count, j = offset; i--; j++) {
2034 if (!remove_xps_queue(dev_maps, cpu, j))
2035 break;
2036 }
2037
2038 active |= i < 0;
Alexander Duyck6234f872016-10-28 11:46:49 -04002039 }
2040
Alexander Duyck184c4492016-10-28 11:50:13 -04002041 return active;
Alexander Duyck6234f872016-10-28 11:46:49 -04002042}
2043
2044static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2045 u16 count)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002046{
2047 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00002048 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002049 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002050
2051 mutex_lock(&xps_map_mutex);
2052 dev_maps = xmap_dereference(dev->xps_maps);
2053
2054 if (!dev_maps)
2055 goto out_no_maps;
2056
Alexander Duyck6234f872016-10-28 11:46:49 -04002057 for_each_possible_cpu(cpu)
2058 active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
2059 offset, count);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002060
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002061 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00002062 RCU_INIT_POINTER(dev->xps_maps, NULL);
2063 kfree_rcu(dev_maps, rcu);
2064 }
2065
Alexander Duyck6234f872016-10-28 11:46:49 -04002066 for (i = offset + (count - 1); count--; i--)
Alexander Duyck024e9672013-01-10 08:57:46 +00002067 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2068 NUMA_NO_NODE);
2069
Alexander Duyck537c00d2013-01-10 08:57:02 +00002070out_no_maps:
2071 mutex_unlock(&xps_map_mutex);
2072}
2073
Alexander Duyck6234f872016-10-28 11:46:49 -04002074static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2075{
2076 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2077}
2078
Alexander Duyck01c5f862013-01-10 08:57:35 +00002079static struct xps_map *expand_xps_map(struct xps_map *map,
2080 int cpu, u16 index)
2081{
2082 struct xps_map *new_map;
2083 int alloc_len = XPS_MIN_MAP_ALLOC;
2084 int i, pos;
2085
2086 for (pos = 0; map && pos < map->len; pos++) {
2087 if (map->queues[pos] != index)
2088 continue;
2089 return map;
2090 }
2091
2092 /* Need to add queue to this CPU's existing map */
2093 if (map) {
2094 if (pos < map->alloc_len)
2095 return map;
2096
2097 alloc_len = map->alloc_len * 2;
2098 }
2099
2100 /* Need to allocate new map to store queue on this CPU's map */
2101 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2102 cpu_to_node(cpu));
2103 if (!new_map)
2104 return NULL;
2105
2106 for (i = 0; i < pos; i++)
2107 new_map->queues[i] = map->queues[i];
2108 new_map->alloc_len = alloc_len;
2109 new_map->len = pos;
2110
2111 return new_map;
2112}
2113
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002114int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2115 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002116{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002117 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck184c4492016-10-28 11:50:13 -04002118 int i, cpu, tci, numa_node_id = -2;
2119 int maps_sz, num_tc = 1, tc = 0;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002120 struct xps_map *map, *new_map;
Alexander Duyck01c5f862013-01-10 08:57:35 +00002121 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002122
Alexander Duyck184c4492016-10-28 11:50:13 -04002123 if (dev->num_tc) {
2124 num_tc = dev->num_tc;
2125 tc = netdev_txq_to_tc(dev, index);
2126 if (tc < 0)
2127 return -EINVAL;
2128 }
2129
2130 maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
2131 if (maps_sz < L1_CACHE_BYTES)
2132 maps_sz = L1_CACHE_BYTES;
2133
Alexander Duyck537c00d2013-01-10 08:57:02 +00002134 mutex_lock(&xps_map_mutex);
2135
2136 dev_maps = xmap_dereference(dev->xps_maps);
2137
Alexander Duyck01c5f862013-01-10 08:57:35 +00002138 /* allocate memory for queue storage */
Alexander Duyck184c4492016-10-28 11:50:13 -04002139 for_each_cpu_and(cpu, cpu_online_mask, mask) {
Alexander Duyck01c5f862013-01-10 08:57:35 +00002140 if (!new_dev_maps)
2141 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002142 if (!new_dev_maps) {
2143 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002144 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002145 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002146
Alexander Duyck184c4492016-10-28 11:50:13 -04002147 tci = cpu * num_tc + tc;
2148 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
Alexander Duyck01c5f862013-01-10 08:57:35 +00002149 NULL;
2150
2151 map = expand_xps_map(map, cpu, index);
2152 if (!map)
2153 goto error;
2154
Alexander Duyck184c4492016-10-28 11:50:13 -04002155 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002156 }
2157
2158 if (!new_dev_maps)
2159 goto out_no_new_maps;
2160
2161 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002162 /* copy maps belonging to foreign traffic classes */
2163 for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
2164 /* fill in the new device map from the old device map */
2165 map = xmap_dereference(dev_maps->cpu_map[tci]);
2166 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2167 }
2168
2169 /* We need to explicitly update tci as prevous loop
2170 * could break out early if dev_maps is NULL.
2171 */
2172 tci = cpu * num_tc + tc;
2173
Alexander Duyck01c5f862013-01-10 08:57:35 +00002174 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2175 /* add queue to CPU maps */
2176 int pos = 0;
2177
Alexander Duyck184c4492016-10-28 11:50:13 -04002178 map = xmap_dereference(new_dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002179 while ((pos < map->len) && (map->queues[pos] != index))
2180 pos++;
2181
2182 if (pos == map->len)
2183 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002184#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002185 if (numa_node_id == -2)
2186 numa_node_id = cpu_to_node(cpu);
2187 else if (numa_node_id != cpu_to_node(cpu))
2188 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002189#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002190 } else if (dev_maps) {
2191 /* fill in the new device map from the old device map */
Alexander Duyck184c4492016-10-28 11:50:13 -04002192 map = xmap_dereference(dev_maps->cpu_map[tci]);
2193 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002194 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002195
Alexander Duyck184c4492016-10-28 11:50:13 -04002196 /* copy maps belonging to foreign traffic classes */
2197 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2198 /* fill in the new device map from the old device map */
2199 map = xmap_dereference(dev_maps->cpu_map[tci]);
2200 RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
2201 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002202 }
2203
Alexander Duyck01c5f862013-01-10 08:57:35 +00002204 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2205
Alexander Duyck537c00d2013-01-10 08:57:02 +00002206 /* Cleanup old maps */
Alexander Duyck184c4492016-10-28 11:50:13 -04002207 if (!dev_maps)
2208 goto out_no_old_maps;
2209
2210 for_each_possible_cpu(cpu) {
2211 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2212 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2213 map = xmap_dereference(dev_maps->cpu_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002214 if (map && map != new_map)
2215 kfree_rcu(map, rcu);
2216 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002217 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002218
Alexander Duyck184c4492016-10-28 11:50:13 -04002219 kfree_rcu(dev_maps, rcu);
2220
2221out_no_old_maps:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002222 dev_maps = new_dev_maps;
2223 active = true;
2224
2225out_no_new_maps:
2226 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002227 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2228 (numa_node_id >= 0) ? numa_node_id :
2229 NUMA_NO_NODE);
2230
Alexander Duyck01c5f862013-01-10 08:57:35 +00002231 if (!dev_maps)
2232 goto out_no_maps;
2233
2234 /* removes queue from unused CPUs */
2235 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002236 for (i = tc, tci = cpu * num_tc; i--; tci++)
2237 active |= remove_xps_queue(dev_maps, tci, index);
2238 if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
2239 active |= remove_xps_queue(dev_maps, tci, index);
2240 for (i = num_tc - tc, tci++; --i; tci++)
2241 active |= remove_xps_queue(dev_maps, tci, index);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002242 }
2243
2244 /* free map if not active */
2245 if (!active) {
2246 RCU_INIT_POINTER(dev->xps_maps, NULL);
2247 kfree_rcu(dev_maps, rcu);
2248 }
2249
2250out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002251 mutex_unlock(&xps_map_mutex);
2252
2253 return 0;
2254error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002255 /* remove any maps that we added */
2256 for_each_possible_cpu(cpu) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002257 for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
2258 new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
2259 map = dev_maps ?
2260 xmap_dereference(dev_maps->cpu_map[tci]) :
2261 NULL;
2262 if (new_map && new_map != map)
2263 kfree(new_map);
2264 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002265 }
2266
Alexander Duyck537c00d2013-01-10 08:57:02 +00002267 mutex_unlock(&xps_map_mutex);
2268
Alexander Duyck537c00d2013-01-10 08:57:02 +00002269 kfree(new_dev_maps);
2270 return -ENOMEM;
2271}
2272EXPORT_SYMBOL(netif_set_xps_queue);
2273
2274#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002275void netdev_reset_tc(struct net_device *dev)
2276{
Alexander Duyck6234f872016-10-28 11:46:49 -04002277#ifdef CONFIG_XPS
2278 netif_reset_xps_queues_gt(dev, 0);
2279#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002280 dev->num_tc = 0;
2281 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2282 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2283}
2284EXPORT_SYMBOL(netdev_reset_tc);
2285
2286int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2287{
2288 if (tc >= dev->num_tc)
2289 return -EINVAL;
2290
Alexander Duyck6234f872016-10-28 11:46:49 -04002291#ifdef CONFIG_XPS
2292 netif_reset_xps_queues(dev, offset, count);
2293#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002294 dev->tc_to_txq[tc].count = count;
2295 dev->tc_to_txq[tc].offset = offset;
2296 return 0;
2297}
2298EXPORT_SYMBOL(netdev_set_tc_queue);
2299
2300int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2301{
2302 if (num_tc > TC_MAX_QUEUE)
2303 return -EINVAL;
2304
Alexander Duyck6234f872016-10-28 11:46:49 -04002305#ifdef CONFIG_XPS
2306 netif_reset_xps_queues_gt(dev, 0);
2307#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002308 dev->num_tc = num_tc;
2309 return 0;
2310}
2311EXPORT_SYMBOL(netdev_set_num_tc);
2312
John Fastabendf0796d52010-07-01 13:21:57 +00002313/*
2314 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2315 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2316 */
Tom Herberte6484932010-10-18 18:04:39 +00002317int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002318{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002319 int rc;
2320
Tom Herberte6484932010-10-18 18:04:39 +00002321 if (txq < 1 || txq > dev->num_tx_queues)
2322 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002323
Ben Hutchings5c565802011-02-15 19:39:21 +00002324 if (dev->reg_state == NETREG_REGISTERED ||
2325 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002326 ASSERT_RTNL();
2327
Tom Herbert1d24eb42010-11-21 13:17:27 +00002328 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2329 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002330 if (rc)
2331 return rc;
2332
John Fastabend4f57c082011-01-17 08:06:04 +00002333 if (dev->num_tc)
2334 netif_setup_tc(dev, txq);
2335
Alexander Duyck024e9672013-01-10 08:57:46 +00002336 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002337 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002338#ifdef CONFIG_XPS
2339 netif_reset_xps_queues_gt(dev, txq);
2340#endif
2341 }
John Fastabendf0796d52010-07-01 13:21:57 +00002342 }
Tom Herberte6484932010-10-18 18:04:39 +00002343
2344 dev->real_num_tx_queues = txq;
2345 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002346}
2347EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002348
Michael Daltona953be52014-01-16 22:23:28 -08002349#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002350/**
2351 * netif_set_real_num_rx_queues - set actual number of RX queues used
2352 * @dev: Network device
2353 * @rxq: Actual number of RX queues
2354 *
2355 * This must be called either with the rtnl_lock held or before
2356 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002357 * negative error code. If called before registration, it always
2358 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002359 */
2360int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2361{
2362 int rc;
2363
Tom Herbertbd25fa72010-10-18 18:00:16 +00002364 if (rxq < 1 || rxq > dev->num_rx_queues)
2365 return -EINVAL;
2366
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002367 if (dev->reg_state == NETREG_REGISTERED) {
2368 ASSERT_RTNL();
2369
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002370 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2371 rxq);
2372 if (rc)
2373 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002374 }
2375
2376 dev->real_num_rx_queues = rxq;
2377 return 0;
2378}
2379EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2380#endif
2381
Ben Hutchings2c530402012-07-10 10:55:09 +00002382/**
2383 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002384 *
2385 * This routine should set an upper limit on the number of RSS queues
2386 * used by default by multiqueue devices.
2387 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002388int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002389{
Hariprasad Shenai40e4e712016-06-08 18:09:08 +05302390 return is_kdump_kernel() ?
2391 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
Yuval Mintz16917b82012-07-01 03:18:50 +00002392}
2393EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2394
Eric Dumazet3bcb8462016-06-04 20:02:28 -07002395static void __netif_reschedule(struct Qdisc *q)
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002396{
2397 struct softnet_data *sd;
2398 unsigned long flags;
2399
2400 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002401 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002402 q->next_sched = NULL;
2403 *sd->output_queue_tailp = q;
2404 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002405 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2406 local_irq_restore(flags);
2407}
2408
David S. Miller37437bb2008-07-16 02:15:04 -07002409void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002410{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002411 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2412 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002413}
2414EXPORT_SYMBOL(__netif_schedule);
2415
Eric Dumazete6247022013-12-05 04:45:08 -08002416struct dev_kfree_skb_cb {
2417 enum skb_free_reason reason;
2418};
2419
2420static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002421{
Eric Dumazete6247022013-12-05 04:45:08 -08002422 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002423}
Denis Vlasenko56079432006-03-29 15:57:29 -08002424
John Fastabend46e5da40a2014-09-12 20:04:52 -07002425void netif_schedule_queue(struct netdev_queue *txq)
2426{
2427 rcu_read_lock();
2428 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2429 struct Qdisc *q = rcu_dereference(txq->qdisc);
2430
2431 __netif_schedule(q);
2432 }
2433 rcu_read_unlock();
2434}
2435EXPORT_SYMBOL(netif_schedule_queue);
2436
John Fastabend46e5da40a2014-09-12 20:04:52 -07002437void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2438{
2439 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2440 struct Qdisc *q;
2441
2442 rcu_read_lock();
2443 q = rcu_dereference(dev_queue->qdisc);
2444 __netif_schedule(q);
2445 rcu_read_unlock();
2446 }
2447}
2448EXPORT_SYMBOL(netif_tx_wake_queue);
2449
Eric Dumazete6247022013-12-05 04:45:08 -08002450void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2451{
2452 unsigned long flags;
2453
Myungho Jung98998862017-04-25 11:58:15 -07002454 if (unlikely(!skb))
2455 return;
2456
Eric Dumazete6247022013-12-05 04:45:08 -08002457 if (likely(atomic_read(&skb->users) == 1)) {
2458 smp_rmb();
2459 atomic_set(&skb->users, 0);
2460 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2461 return;
2462 }
2463 get_kfree_skb_cb(skb)->reason = reason;
2464 local_irq_save(flags);
2465 skb->next = __this_cpu_read(softnet_data.completion_queue);
2466 __this_cpu_write(softnet_data.completion_queue, skb);
2467 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2468 local_irq_restore(flags);
2469}
2470EXPORT_SYMBOL(__dev_kfree_skb_irq);
2471
2472void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002473{
2474 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002475 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002476 else
2477 dev_kfree_skb(skb);
2478}
Eric Dumazete6247022013-12-05 04:45:08 -08002479EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002480
2481
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002482/**
2483 * netif_device_detach - mark device as removed
2484 * @dev: network device
2485 *
2486 * Mark device as removed from system and therefore no longer available.
2487 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002488void netif_device_detach(struct net_device *dev)
2489{
2490 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2491 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002492 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002493 }
2494}
2495EXPORT_SYMBOL(netif_device_detach);
2496
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002497/**
2498 * netif_device_attach - mark device as attached
2499 * @dev: network device
2500 *
2501 * Mark device as attached from system and restart if needed.
2502 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002503void netif_device_attach(struct net_device *dev)
2504{
2505 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2506 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002507 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002508 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002509 }
2510}
2511EXPORT_SYMBOL(netif_device_attach);
2512
Jiri Pirko5605c762015-05-12 14:56:12 +02002513/*
2514 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2515 * to be used as a distribution range.
2516 */
2517u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2518 unsigned int num_tx_queues)
2519{
2520 u32 hash;
2521 u16 qoffset = 0;
2522 u16 qcount = num_tx_queues;
2523
2524 if (skb_rx_queue_recorded(skb)) {
2525 hash = skb_get_rx_queue(skb);
2526 while (unlikely(hash >= num_tx_queues))
2527 hash -= num_tx_queues;
2528 return hash;
2529 }
2530
2531 if (dev->num_tc) {
2532 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
tchardingf4563a72017-02-09 17:56:07 +11002533
Jiri Pirko5605c762015-05-12 14:56:12 +02002534 qoffset = dev->tc_to_txq[tc].offset;
2535 qcount = dev->tc_to_txq[tc].count;
2536 }
2537
2538 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2539}
2540EXPORT_SYMBOL(__skb_tx_hash);
2541
Ben Hutchings36c92472012-01-17 07:57:56 +00002542static void skb_warn_bad_offload(const struct sk_buff *skb)
2543{
Wei Tang84d15ae2016-06-16 21:17:49 +08002544 static const netdev_features_t null_features;
Ben Hutchings36c92472012-01-17 07:57:56 +00002545 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01002546 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00002547
Ben Greearc846ad92013-04-19 10:45:52 +00002548 if (!net_ratelimit())
2549 return;
2550
Bjørn Mork88ad4172015-11-16 19:16:40 +01002551 if (dev) {
2552 if (dev->dev.parent)
2553 name = dev_driver_string(dev->dev.parent);
2554 else
2555 name = netdev_name(dev);
2556 }
Ben Hutchings36c92472012-01-17 07:57:56 +00002557 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2558 "gso_type=%d ip_summed=%d\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01002559 name, dev ? &dev->features : &null_features,
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002560 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002561 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2562 skb_shinfo(skb)->gso_type, skb->ip_summed);
2563}
2564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565/*
2566 * Invalidate hardware checksum when packet is to be mangled, and
2567 * complete checksum manually on outgoing path.
2568 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002569int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570{
Al Virod3bc23e2006-11-14 21:24:49 -08002571 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002572 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573
Patrick McHardy84fa7932006-08-29 16:44:56 -07002574 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002575 goto out_set_summed;
2576
2577 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002578 skb_warn_bad_offload(skb);
2579 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 }
2581
Eric Dumazetcef401d2013-01-25 20:34:37 +00002582 /* Before computing a checksum, we should make sure no frag could
2583 * be modified by an external entity : checksum could be wrong.
2584 */
2585 if (skb_has_shared_frag(skb)) {
2586 ret = __skb_linearize(skb);
2587 if (ret)
2588 goto out;
2589 }
2590
Michał Mirosław55508d62010-12-14 15:24:08 +00002591 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002592 BUG_ON(offset >= skb_headlen(skb));
2593 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2594
2595 offset += skb->csum_offset;
2596 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2597
2598 if (skb_cloned(skb) &&
2599 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2601 if (ret)
2602 goto out;
2603 }
2604
Eric Dumazet4f2e4ad2016-10-29 11:02:36 -07002605 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
Herbert Xua430a432006-07-08 13:34:56 -07002606out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002608out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 return ret;
2610}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002611EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612
Vlad Yasevich53d64712014-03-27 17:26:18 -04002613__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002614{
2615 __be16 type = skb->protocol;
2616
Pravin B Shelar19acc322013-05-07 20:41:07 +00002617 /* Tunnel gso handlers can set protocol to ethernet. */
2618 if (type == htons(ETH_P_TEB)) {
2619 struct ethhdr *eth;
2620
2621 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2622 return 0;
2623
2624 eth = (struct ethhdr *)skb_mac_header(skb);
2625 type = eth->h_proto;
2626 }
2627
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002628 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002629}
2630
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002631/**
2632 * skb_mac_gso_segment - mac layer segmentation handler.
2633 * @skb: buffer to segment
2634 * @features: features for the output path (see dev->features)
2635 */
2636struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2637 netdev_features_t features)
2638{
2639 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2640 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002641 int vlan_depth = skb->mac_len;
2642 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002643
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002644 if (unlikely(!type))
2645 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002646
Vlad Yasevich53d64712014-03-27 17:26:18 -04002647 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002648
2649 rcu_read_lock();
2650 list_for_each_entry_rcu(ptype, &offload_base, list) {
2651 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002652 segs = ptype->callbacks.gso_segment(skb, features);
2653 break;
2654 }
2655 }
2656 rcu_read_unlock();
2657
2658 __skb_push(skb, skb->data - skb_mac_header(skb));
2659
2660 return segs;
2661}
2662EXPORT_SYMBOL(skb_mac_gso_segment);
2663
2664
Cong Wang12b00042013-02-05 16:36:38 +00002665/* openvswitch calls this on rx path, so we need a different check.
2666 */
2667static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2668{
2669 if (tx_path)
Eric Dumazet6e7bc472017-02-03 14:29:42 -08002670 return skb->ip_summed != CHECKSUM_PARTIAL &&
2671 skb->ip_summed != CHECKSUM_NONE;
2672
2673 return skb->ip_summed == CHECKSUM_NONE;
Cong Wang12b00042013-02-05 16:36:38 +00002674}
2675
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002676/**
Cong Wang12b00042013-02-05 16:36:38 +00002677 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002678 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002679 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002680 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002681 *
2682 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002683 *
2684 * It may return NULL if the skb requires no segmentation. This is
2685 * only possible when GSO is used for verifying header integrity.
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002686 *
2687 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002688 */
Cong Wang12b00042013-02-05 16:36:38 +00002689struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2690 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002691{
Eric Dumazetb2504a52017-01-31 10:20:32 -08002692 struct sk_buff *segs;
2693
Cong Wang12b00042013-02-05 16:36:38 +00002694 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002695 int err;
2696
Eric Dumazetb2504a52017-01-31 10:20:32 -08002697 /* We're going to init ->check field in TCP or UDP header */
françois romieua40e0a62014-07-15 23:55:35 +02002698 err = skb_cow_head(skb, 0);
2699 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002700 return ERR_PTR(err);
2701 }
2702
Alexander Duyck802ab552016-04-10 21:45:03 -04002703 /* Only report GSO partial support if it will enable us to
2704 * support segmentation on this frame without needing additional
2705 * work.
2706 */
2707 if (features & NETIF_F_GSO_PARTIAL) {
2708 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
2709 struct net_device *dev = skb->dev;
2710
2711 partial_features |= dev->features & dev->gso_partial_features;
2712 if (!skb_gso_ok(skb, features | partial_features))
2713 features &= ~NETIF_F_GSO_PARTIAL;
2714 }
2715
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03002716 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2717 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2718
Pravin B Shelar68c33162013-02-14 14:02:41 +00002719 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002720 SKB_GSO_CB(skb)->encap_level = 0;
2721
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002722 skb_reset_mac_header(skb);
2723 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002724
Eric Dumazetb2504a52017-01-31 10:20:32 -08002725 segs = skb_mac_gso_segment(skb, features);
2726
2727 if (unlikely(skb_needs_check(skb, tx_path)))
2728 skb_warn_bad_offload(skb);
2729
2730 return segs;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002731}
Cong Wang12b00042013-02-05 16:36:38 +00002732EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002733
Herbert Xufb286bb2005-11-10 13:01:24 -08002734/* Take action when hardware reception checksum errors are detected. */
2735#ifdef CONFIG_BUG
2736void netdev_rx_csum_fault(struct net_device *dev)
2737{
2738 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002739 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002740 dump_stack();
2741 }
2742}
2743EXPORT_SYMBOL(netdev_rx_csum_fault);
2744#endif
2745
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746/* Actually, we should eliminate this check as soon as we know, that:
2747 * 1. IOMMU is present and allows to map all the memory.
2748 * 2. No high memory really exists on this machine.
2749 */
2750
Florian Westphalc1e756b2014-05-05 15:00:44 +02002751static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002753#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 int i;
tchardingf4563a72017-02-09 17:56:07 +11002755
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002756 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002757 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2758 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tchardingf4563a72017-02-09 17:56:07 +11002759
Ian Campbellea2ab692011-08-22 23:44:58 +00002760 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002761 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002762 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002765 if (PCI_DMA_BUS_IS_PHYS) {
2766 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Eric Dumazet9092c652010-04-02 13:34:49 -07002768 if (!pdev)
2769 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002770 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002771 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2772 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
tchardingf4563a72017-02-09 17:56:07 +11002773
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002774 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2775 return 1;
2776 }
2777 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002778#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 return 0;
2780}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781
Simon Horman3b392dd2014-06-04 08:53:17 +09002782/* If MPLS offload request, verify we are testing hardware MPLS features
2783 * instead of standard features for the netdev.
2784 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002785#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002786static netdev_features_t net_mpls_features(struct sk_buff *skb,
2787 netdev_features_t features,
2788 __be16 type)
2789{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002790 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002791 features &= skb->dev->mpls_features;
2792
2793 return features;
2794}
2795#else
2796static netdev_features_t net_mpls_features(struct sk_buff *skb,
2797 netdev_features_t features,
2798 __be16 type)
2799{
2800 return features;
2801}
2802#endif
2803
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002804static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002805 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002806{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002807 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002808 __be16 type;
2809
2810 type = skb_network_protocol(skb, &tmp);
2811 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002812
Ed Cashinc0d680e2012-09-19 15:49:00 +00002813 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002814 !can_checksum_protocol(features, type)) {
Alexander Duyck996e8022016-05-02 09:25:10 -07002815 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Jesse Grossf01a5232011-01-09 06:23:31 +00002816 }
Eric Dumazet7be2c822017-01-18 12:12:17 -08002817 if (illegal_highdma(skb->dev, skb))
2818 features &= ~NETIF_F_SG;
Jesse Grossf01a5232011-01-09 06:23:31 +00002819
2820 return features;
2821}
2822
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002823netdev_features_t passthru_features_check(struct sk_buff *skb,
2824 struct net_device *dev,
2825 netdev_features_t features)
2826{
2827 return features;
2828}
2829EXPORT_SYMBOL(passthru_features_check);
2830
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002831static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2832 struct net_device *dev,
2833 netdev_features_t features)
2834{
2835 return vlan_features_check(skb, features);
2836}
2837
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002838static netdev_features_t gso_features_check(const struct sk_buff *skb,
2839 struct net_device *dev,
2840 netdev_features_t features)
2841{
2842 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2843
2844 if (gso_segs > dev->gso_max_segs)
2845 return features & ~NETIF_F_GSO_MASK;
2846
Alexander Duyck802ab552016-04-10 21:45:03 -04002847 /* Support for GSO partial features requires software
2848 * intervention before we can actually process the packets
2849 * so we need to strip support for any partial features now
2850 * and we can pull them back in after we have partially
2851 * segmented the frame.
2852 */
2853 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
2854 features &= ~dev->gso_partial_features;
2855
2856 /* Make sure to clear the IPv4 ID mangling feature if the
2857 * IPv4 header has the potential to be fragmented.
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002858 */
2859 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
2860 struct iphdr *iph = skb->encapsulation ?
2861 inner_ip_hdr(skb) : ip_hdr(skb);
2862
2863 if (!(iph->frag_off & htons(IP_DF)))
2864 features &= ~NETIF_F_TSO_MANGLEID;
2865 }
2866
2867 return features;
2868}
2869
Florian Westphalc1e756b2014-05-05 15:00:44 +02002870netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002871{
Jesse Gross5f352272014-12-23 22:37:26 -08002872 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002873 netdev_features_t features = dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002874
Alexander Duyckcbc53e02016-04-10 21:44:51 -04002875 if (skb_is_gso(skb))
2876 features = gso_features_check(skb, dev, features);
Ben Hutchings30b678d2012-07-30 15:57:00 +00002877
Jesse Gross5f352272014-12-23 22:37:26 -08002878 /* If encapsulation offload request, verify we are testing
2879 * hardware encapsulation features instead of standard
2880 * features for the netdev
2881 */
2882 if (skb->encapsulation)
2883 features &= dev->hw_enc_features;
2884
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09002885 if (skb_vlan_tagged(skb))
2886 features = netdev_intersect_features(features,
2887 dev->vlan_features |
2888 NETIF_F_HW_VLAN_CTAG_TX |
2889 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00002890
Jesse Gross5f352272014-12-23 22:37:26 -08002891 if (dev->netdev_ops->ndo_features_check)
2892 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2893 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002894 else
2895 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002896
Florian Westphalc1e756b2014-05-05 15:00:44 +02002897 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002898}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002899EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002900
David S. Miller2ea25512014-08-29 21:10:01 -07002901static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07002902 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002903{
David S. Miller2ea25512014-08-29 21:10:01 -07002904 unsigned int len;
2905 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08002906
Salam Noureddine7866a622015-01-27 11:35:48 -08002907 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07002908 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00002909
David S. Miller2ea25512014-08-29 21:10:01 -07002910 len = skb->len;
2911 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07002912 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07002913 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00002914
Patrick McHardy572a9d72009-11-10 06:14:14 +00002915 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002916}
David S. Miller2ea25512014-08-29 21:10:01 -07002917
David S. Miller8dcda222014-09-01 15:06:40 -07002918struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2919 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07002920{
2921 struct sk_buff *skb = first;
2922 int rc = NETDEV_TX_OK;
2923
2924 while (skb) {
2925 struct sk_buff *next = skb->next;
2926
2927 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07002928 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07002929 if (unlikely(!dev_xmit_complete(rc))) {
2930 skb->next = next;
2931 goto out;
2932 }
2933
2934 skb = next;
2935 if (netif_xmit_stopped(txq) && skb) {
2936 rc = NETDEV_TX_BUSY;
2937 break;
2938 }
2939 }
2940
2941out:
2942 *ret = rc;
2943 return skb;
2944}
2945
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07002946static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2947 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07002948{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002949 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01002950 !vlan_hw_offload_capable(features, skb->vlan_proto))
2951 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07002952 return skb;
2953}
2954
Eric Dumazet55a93b32014-10-03 15:31:07 -07002955static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
David S. Millereae3f882014-08-30 15:17:13 -07002956{
2957 netdev_features_t features;
2958
David S. Millereae3f882014-08-30 15:17:13 -07002959 features = netif_skb_features(skb);
2960 skb = validate_xmit_vlan(skb, features);
2961 if (unlikely(!skb))
2962 goto out_null;
2963
Johannes Berg8b86a612015-04-17 15:45:04 +02002964 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07002965 struct sk_buff *segs;
2966
2967 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08002968 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08002969 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08002970 } else if (segs) {
2971 consume_skb(skb);
2972 skb = segs;
2973 }
David S. Millereae3f882014-08-30 15:17:13 -07002974 } else {
2975 if (skb_needs_linearize(skb, features) &&
2976 __skb_linearize(skb))
2977 goto out_kfree_skb;
2978
Steffen Klassertf6e27112017-04-14 10:07:28 +02002979 if (validate_xmit_xfrm(skb, features))
2980 goto out_kfree_skb;
2981
David S. Millereae3f882014-08-30 15:17:13 -07002982 /* If packet is not checksummed and device does not
2983 * support checksumming for this protocol, complete
2984 * checksumming here.
2985 */
2986 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2987 if (skb->encapsulation)
2988 skb_set_inner_transport_header(skb,
2989 skb_checksum_start_offset(skb));
2990 else
2991 skb_set_transport_header(skb,
2992 skb_checksum_start_offset(skb));
Tom Herberta1882222015-12-14 11:19:43 -08002993 if (!(features & NETIF_F_CSUM_MASK) &&
David S. Millereae3f882014-08-30 15:17:13 -07002994 skb_checksum_help(skb))
2995 goto out_kfree_skb;
2996 }
2997 }
2998
2999 return skb;
3000
3001out_kfree_skb:
3002 kfree_skb(skb);
3003out_null:
Eric Dumazetd21fd632016-04-12 21:50:07 -07003004 atomic_long_inc(&dev->tx_dropped);
David S. Millereae3f882014-08-30 15:17:13 -07003005 return NULL;
3006}
3007
Eric Dumazet55a93b32014-10-03 15:31:07 -07003008struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3009{
3010 struct sk_buff *next, *head = NULL, *tail;
3011
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003012 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07003013 next = skb->next;
3014 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003015
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003016 /* in case skb wont be segmented, point to itself */
3017 skb->prev = skb;
3018
3019 skb = validate_xmit_skb(skb, dev);
3020 if (!skb)
3021 continue;
3022
3023 if (!head)
3024 head = skb;
3025 else
3026 tail->next = skb;
3027 /* If skb was segmented, skb->prev points to
3028 * the last segment. If not, it still contains skb.
3029 */
3030 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003031 }
3032 return head;
3033}
Willem de Bruijn104ba782016-10-26 11:23:07 -04003034EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003035
Eric Dumazet1def9232013-01-10 12:36:42 +00003036static void qdisc_pkt_len_init(struct sk_buff *skb)
3037{
3038 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3039
3040 qdisc_skb_cb(skb)->pkt_len = skb->len;
3041
3042 /* To get more precise estimation of bytes sent on wire,
3043 * we add to pkt_len the headers size of all segments
3044 */
3045 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08003046 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00003047 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00003048
Eric Dumazet757b8b12013-01-15 21:14:21 -08003049 /* mac layer + network layer */
3050 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3051
3052 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00003053 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3054 hdr_len += tcp_hdrlen(skb);
3055 else
3056 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00003057
3058 if (shinfo->gso_type & SKB_GSO_DODGY)
3059 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3060 shinfo->gso_size);
3061
3062 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00003063 }
3064}
3065
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003066static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3067 struct net_device *dev,
3068 struct netdev_queue *txq)
3069{
3070 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazet520ac302016-06-21 23:16:49 -07003071 struct sk_buff *to_free = NULL;
Eric Dumazeta2da5702011-01-20 03:48:19 +00003072 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003073 int rc;
3074
Eric Dumazeta2da5702011-01-20 03:48:19 +00003075 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003076 /*
3077 * Heuristic to force contended enqueues to serialize on a
3078 * separate lock before trying to get qdisc main lock.
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07003079 * This permits qdisc->running owner to get the lock more
Ying Xue9bf2b8c2014-06-26 15:56:31 +08003080 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07003081 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00003082 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003083 if (unlikely(contended))
3084 spin_lock(&q->busylock);
3085
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003086 spin_lock(root_lock);
3087 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
Eric Dumazet520ac302016-06-21 23:16:49 -07003088 __qdisc_drop(skb, &to_free);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003089 rc = NET_XMIT_DROP;
3090 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07003091 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003092 /*
3093 * This is a work-conserving queue; there are no old skbs
3094 * waiting to be sent out; and the qdisc is not running -
3095 * xmit the skb directly.
3096 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003097
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003098 qdisc_bstats_update(q, skb);
3099
Eric Dumazet55a93b32014-10-03 15:31:07 -07003100 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07003101 if (unlikely(contended)) {
3102 spin_unlock(&q->busylock);
3103 contended = false;
3104 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003105 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003106 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07003107 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003108
3109 rc = NET_XMIT_SUCCESS;
3110 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -07003111 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07003112 if (qdisc_run_begin(q)) {
3113 if (unlikely(contended)) {
3114 spin_unlock(&q->busylock);
3115 contended = false;
3116 }
3117 __qdisc_run(q);
3118 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003119 }
3120 spin_unlock(root_lock);
Eric Dumazet520ac302016-06-21 23:16:49 -07003121 if (unlikely(to_free))
3122 kfree_skb_list(to_free);
Eric Dumazet79640a42010-06-02 05:09:29 -07003123 if (unlikely(contended))
3124 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003125 return rc;
3126}
3127
Daniel Borkmann86f85152013-12-29 17:27:11 +01003128#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00003129static void skb_update_prio(struct sk_buff *skb)
3130{
Igor Maravic6977a792011-11-25 07:44:54 +00003131 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00003132
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003133 if (!skb->priority && skb->sk && map) {
Tejun Heo2a56a1f2015-12-07 17:38:52 -05003134 unsigned int prioidx =
3135 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003136
3137 if (prioidx < map->priomap_len)
3138 skb->priority = map->priomap[prioidx];
3139 }
Neil Horman5bc14212011-11-22 05:10:51 +00003140}
3141#else
3142#define skb_update_prio(skb)
3143#endif
3144
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02003145DEFINE_PER_CPU(int, xmit_recursion);
3146EXPORT_SYMBOL(xmit_recursion);
3147
Dave Jonesd29f7492008-07-22 14:09:06 -07003148/**
Michel Machado95603e22012-06-12 10:16:35 +00003149 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003150 * @net: network namespace this loopback is happening in
3151 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00003152 * @skb: buffer to transmit
3153 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003154int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00003155{
3156 skb_reset_mac_header(skb);
3157 __skb_pull(skb, skb_network_offset(skb));
3158 skb->pkt_type = PACKET_LOOPBACK;
3159 skb->ip_summed = CHECKSUM_UNNECESSARY;
3160 WARN_ON(!skb_dst(skb));
3161 skb_dst_force(skb);
3162 netif_rx_ni(skb);
3163 return 0;
3164}
3165EXPORT_SYMBOL(dev_loopback_xmit);
3166
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003167#ifdef CONFIG_NET_EGRESS
3168static struct sk_buff *
3169sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3170{
3171 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3172 struct tcf_result cl_res;
3173
3174 if (!cl)
3175 return skb;
3176
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003177 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003178 qdisc_bstats_cpu_update(cl->q, skb);
3179
3180 switch (tc_classify(skb, cl, &cl_res, false)) {
3181 case TC_ACT_OK:
3182 case TC_ACT_RECLASSIFY:
3183 skb->tc_index = TC_H_MIN(cl_res.classid);
3184 break;
3185 case TC_ACT_SHOT:
3186 qdisc_qstats_cpu_drop(cl->q);
3187 *ret = NET_XMIT_DROP;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003188 kfree_skb(skb);
3189 return NULL;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003190 case TC_ACT_STOLEN:
3191 case TC_ACT_QUEUED:
3192 *ret = NET_XMIT_SUCCESS;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003193 consume_skb(skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003194 return NULL;
3195 case TC_ACT_REDIRECT:
3196 /* No need to push/pop skb's mac_header here on egress! */
3197 skb_do_redirect(skb);
3198 *ret = NET_XMIT_SUCCESS;
3199 return NULL;
3200 default:
3201 break;
3202 }
3203
3204 return skb;
3205}
3206#endif /* CONFIG_NET_EGRESS */
3207
Jiri Pirko638b2a62015-05-12 14:56:13 +02003208static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3209{
3210#ifdef CONFIG_XPS
3211 struct xps_dev_maps *dev_maps;
3212 struct xps_map *map;
3213 int queue_index = -1;
3214
3215 rcu_read_lock();
3216 dev_maps = rcu_dereference(dev->xps_maps);
3217 if (dev_maps) {
Alexander Duyck184c4492016-10-28 11:50:13 -04003218 unsigned int tci = skb->sender_cpu - 1;
3219
3220 if (dev->num_tc) {
3221 tci *= dev->num_tc;
3222 tci += netdev_get_prio_tc_map(dev, skb->priority);
3223 }
3224
3225 map = rcu_dereference(dev_maps->cpu_map[tci]);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003226 if (map) {
3227 if (map->len == 1)
3228 queue_index = map->queues[0];
3229 else
3230 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3231 map->len)];
3232 if (unlikely(queue_index >= dev->real_num_tx_queues))
3233 queue_index = -1;
3234 }
3235 }
3236 rcu_read_unlock();
3237
3238 return queue_index;
3239#else
3240 return -1;
3241#endif
3242}
3243
3244static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3245{
3246 struct sock *sk = skb->sk;
3247 int queue_index = sk_tx_queue_get(sk);
3248
3249 if (queue_index < 0 || skb->ooo_okay ||
3250 queue_index >= dev->real_num_tx_queues) {
3251 int new_index = get_xps_queue(dev, skb);
tchardingf4563a72017-02-09 17:56:07 +11003252
Jiri Pirko638b2a62015-05-12 14:56:13 +02003253 if (new_index < 0)
3254 new_index = skb_tx_hash(dev, skb);
3255
3256 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003257 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003258 rcu_access_pointer(sk->sk_dst_cache))
3259 sk_tx_queue_set(sk, new_index);
3260
3261 queue_index = new_index;
3262 }
3263
3264 return queue_index;
3265}
3266
3267struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3268 struct sk_buff *skb,
3269 void *accel_priv)
3270{
3271 int queue_index = 0;
3272
3273#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08003274 u32 sender_cpu = skb->sender_cpu - 1;
3275
3276 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003277 skb->sender_cpu = raw_smp_processor_id() + 1;
3278#endif
3279
3280 if (dev->real_num_tx_queues != 1) {
3281 const struct net_device_ops *ops = dev->netdev_ops;
tchardingf4563a72017-02-09 17:56:07 +11003282
Jiri Pirko638b2a62015-05-12 14:56:13 +02003283 if (ops->ndo_select_queue)
3284 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3285 __netdev_pick_tx);
3286 else
3287 queue_index = __netdev_pick_tx(dev, skb);
3288
3289 if (!accel_priv)
3290 queue_index = netdev_cap_txqueue(dev, queue_index);
3291 }
3292
3293 skb_set_queue_mapping(skb, queue_index);
3294 return netdev_get_tx_queue(dev, queue_index);
3295}
3296
Michel Machado95603e22012-06-12 10:16:35 +00003297/**
Jason Wang9d08dd32014-01-20 11:25:13 +08003298 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07003299 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08003300 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07003301 *
3302 * Queue a buffer for transmission to a network device. The caller must
3303 * have set the device and priority and built the buffer before calling
3304 * this function. The function can be called from an interrupt.
3305 *
3306 * A negative errno code is returned on a failure. A success does not
3307 * guarantee the frame will be transmitted as it may be dropped due
3308 * to congestion or traffic shaping.
3309 *
3310 * -----------------------------------------------------------------------------------
3311 * I notice this method can also return errors from the queue disciplines,
3312 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3313 * be positive.
3314 *
3315 * Regardless of the return value, the skb is consumed, so it is currently
3316 * difficult to retry a send to this method. (You can bump the ref count
3317 * before sending to hold a reference for retry if you are careful.)
3318 *
3319 * When calling this method, interrupts MUST be enabled. This is because
3320 * the BH enable code must have IRQs enabled so that it will not deadlock.
3321 * --BLG
3322 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05303323static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324{
3325 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07003326 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 struct Qdisc *q;
3328 int rc = -ENOMEM;
3329
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00003330 skb_reset_mac_header(skb);
3331
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003332 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3333 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3334
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003335 /* Disable soft irqs for various locks below. Also
3336 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003338 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
Neil Horman5bc14212011-11-22 05:10:51 +00003340 skb_update_prio(skb);
3341
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003342 qdisc_pkt_len_init(skb);
3343#ifdef CONFIG_NET_CLS_ACT
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003344 skb->tc_at_ingress = 0;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003345# ifdef CONFIG_NET_EGRESS
3346 if (static_key_false(&egress_needed)) {
3347 skb = sch_handle_egress(skb, &rc, dev);
3348 if (!skb)
3349 goto out;
3350 }
3351# endif
3352#endif
Eric Dumazet02875872014-10-05 18:38:35 -07003353 /* If device/qdisc don't need skb->dst, release it right now while
3354 * its hot in this cpu cache.
3355 */
3356 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3357 skb_dst_drop(skb);
3358 else
3359 skb_dst_force(skb);
3360
Jason Wangf663dd92014-01-10 16:18:26 +08003361 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08003362 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07003363
Koki Sanagicf66ba52010-08-23 18:45:02 +09003364 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003366 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07003367 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 }
3369
3370 /* The device has no queue. Common case for software devices:
tchardingeb13da12017-02-09 17:56:06 +11003371 * loopback, all the sorts of tunnels...
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372
tchardingeb13da12017-02-09 17:56:06 +11003373 * Really, it is unlikely that netif_tx_lock protection is necessary
3374 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
3375 * counters.)
3376 * However, it is possible, that they rely on protection
3377 * made by us here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378
tchardingeb13da12017-02-09 17:56:06 +11003379 * Check this and shot the lock. It is not prone from deadlocks.
3380 *Either shot noqueue qdisc, it is even simpler 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 */
3382 if (dev->flags & IFF_UP) {
3383 int cpu = smp_processor_id(); /* ok because BHs are off */
3384
David S. Millerc773e842008-07-08 23:13:53 -07003385 if (txq->xmit_lock_owner != cpu) {
Daniel Borkmanna70b5062016-06-10 21:19:06 +02003386 if (unlikely(__this_cpu_read(xmit_recursion) >
3387 XMIT_RECURSION_LIMIT))
Eric Dumazet745e20f2010-09-29 13:23:09 -07003388 goto recursion_alert;
3389
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003390 skb = validate_xmit_skb(skb, dev);
3391 if (!skb)
Eric Dumazetd21fd632016-04-12 21:50:07 -07003392 goto out;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003393
David S. Millerc773e842008-07-08 23:13:53 -07003394 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395
Tom Herbert734664982011-11-28 16:32:44 +00003396 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07003397 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07003398 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07003399 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00003400 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003401 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 goto out;
3403 }
3404 }
David S. Millerc773e842008-07-08 23:13:53 -07003405 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003406 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3407 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 } else {
3409 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003410 * unfortunately
3411 */
3412recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003413 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3414 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 }
3416 }
3417
3418 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07003419 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420
Eric Dumazet015f0682014-03-27 08:45:56 -07003421 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003422 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423 return rc;
3424out:
Herbert Xud4828d82006-06-22 02:28:18 -07003425 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 return rc;
3427}
Jason Wangf663dd92014-01-10 16:18:26 +08003428
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003429int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003430{
3431 return __dev_queue_xmit(skb, NULL);
3432}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003433EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434
Jason Wangf663dd92014-01-10 16:18:26 +08003435int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3436{
3437 return __dev_queue_xmit(skb, accel_priv);
3438}
3439EXPORT_SYMBOL(dev_queue_xmit_accel);
3440
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441
tchardingeb13da12017-02-09 17:56:06 +11003442/*************************************************************************
3443 * Receiver routines
3444 *************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003446int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003447EXPORT_SYMBOL(netdev_max_backlog);
3448
Eric Dumazet3b098e22010-05-15 23:57:10 -07003449int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003450int netdev_budget __read_mostly = 300;
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04003451unsigned int __read_mostly netdev_budget_usecs = 2000;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01003452int weight_p __read_mostly = 64; /* old backlog weight */
3453int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
3454int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
3455int dev_rx_weight __read_mostly = 64;
3456int dev_tx_weight __read_mostly = 64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003458/* Called with irq disabled */
3459static inline void ____napi_schedule(struct softnet_data *sd,
3460 struct napi_struct *napi)
3461{
3462 list_add_tail(&napi->poll_list, &sd->poll_list);
3463 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3464}
3465
Eric Dumazetdf334542010-03-24 19:13:54 +00003466#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003467
3468/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003469struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003470EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003471u32 rps_cpu_mask __read_mostly;
3472EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003473
Ingo Molnarc5905af2012-02-24 08:31:31 +01003474struct static_key rps_needed __read_mostly;
Jason Wang3df97ba2016-04-25 23:13:42 -04003475EXPORT_SYMBOL(rps_needed);
Eric Dumazet13bfff22016-12-07 08:29:10 -08003476struct static_key rfs_needed __read_mostly;
3477EXPORT_SYMBOL(rfs_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +00003478
Ben Hutchingsc4454772011-01-19 11:03:53 +00003479static struct rps_dev_flow *
3480set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3481 struct rps_dev_flow *rflow, u16 next_cpu)
3482{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003483 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003484#ifdef CONFIG_RFS_ACCEL
3485 struct netdev_rx_queue *rxqueue;
3486 struct rps_dev_flow_table *flow_table;
3487 struct rps_dev_flow *old_rflow;
3488 u32 flow_id;
3489 u16 rxq_index;
3490 int rc;
3491
3492 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003493 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3494 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003495 goto out;
3496 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3497 if (rxq_index == skb_get_rx_queue(skb))
3498 goto out;
3499
3500 rxqueue = dev->_rx + rxq_index;
3501 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3502 if (!flow_table)
3503 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003504 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003505 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3506 rxq_index, flow_id);
3507 if (rc < 0)
3508 goto out;
3509 old_rflow = rflow;
3510 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003511 rflow->filter = rc;
3512 if (old_rflow->filter == rflow->filter)
3513 old_rflow->filter = RPS_NO_FILTER;
3514 out:
3515#endif
3516 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003517 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003518 }
3519
Ben Hutchings09994d12011-10-03 04:42:46 +00003520 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003521 return rflow;
3522}
3523
Tom Herbert0a9627f2010-03-16 08:03:29 +00003524/*
3525 * get_rps_cpu is called from netif_receive_skb and returns the target
3526 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003527 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003528 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003529static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3530 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003531{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003532 const struct rps_sock_flow_table *sock_flow_table;
3533 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003534 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003535 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003536 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003537 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003538 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003539
Tom Herbert0a9627f2010-03-16 08:03:29 +00003540 if (skb_rx_queue_recorded(skb)) {
3541 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003542
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003543 if (unlikely(index >= dev->real_num_rx_queues)) {
3544 WARN_ONCE(dev->real_num_rx_queues > 1,
3545 "%s received packet on queue %u, but number "
3546 "of RX queues is %u\n",
3547 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003548 goto done;
3549 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003550 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003551 }
3552
Eric Dumazet567e4b72015-02-06 12:59:01 -08003553 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3554
3555 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3556 map = rcu_dereference(rxqueue->rps_map);
3557 if (!flow_table && !map)
3558 goto done;
3559
Changli Gao2d47b452010-08-17 19:00:56 +00003560 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003561 hash = skb_get_hash(skb);
3562 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003563 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003564
Tom Herbertfec5e652010-04-16 16:01:27 -07003565 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3566 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003567 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003568 u32 next_cpu;
3569 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003570
Eric Dumazet567e4b72015-02-06 12:59:01 -08003571 /* First check into global flow table if there is a match */
3572 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3573 if ((ident ^ hash) & ~rps_cpu_mask)
3574 goto try_rps;
3575
3576 next_cpu = ident & rps_cpu_mask;
3577
3578 /* OK, now we know there is a match,
3579 * we can look at the local (per receive queue) flow table
3580 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003581 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003582 tcpu = rflow->cpu;
3583
Tom Herbertfec5e652010-04-16 16:01:27 -07003584 /*
3585 * If the desired CPU (where last recvmsg was done) is
3586 * different from current CPU (one in the rx-queue flow
3587 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003588 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003589 * - Current CPU is offline.
3590 * - The current CPU's queue tail has advanced beyond the
3591 * last packet that was enqueued using this table entry.
3592 * This guarantees that all previous packets for the flow
3593 * have been dequeued, thus preserving in order delivery.
3594 */
3595 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003596 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003597 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003598 rflow->last_qtail)) >= 0)) {
3599 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003600 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003601 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003602
Eric Dumazeta31196b2015-04-25 09:35:24 -07003603 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003604 *rflowp = rflow;
3605 cpu = tcpu;
3606 goto done;
3607 }
3608 }
3609
Eric Dumazet567e4b72015-02-06 12:59:01 -08003610try_rps:
3611
Tom Herbert0a9627f2010-03-16 08:03:29 +00003612 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003613 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003614 if (cpu_online(tcpu)) {
3615 cpu = tcpu;
3616 goto done;
3617 }
3618 }
3619
3620done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003621 return cpu;
3622}
3623
Ben Hutchingsc4454772011-01-19 11:03:53 +00003624#ifdef CONFIG_RFS_ACCEL
3625
3626/**
3627 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3628 * @dev: Device on which the filter was set
3629 * @rxq_index: RX queue index
3630 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3631 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3632 *
3633 * Drivers that implement ndo_rx_flow_steer() should periodically call
3634 * this function for each installed filter and remove the filters for
3635 * which it returns %true.
3636 */
3637bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3638 u32 flow_id, u16 filter_id)
3639{
3640 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3641 struct rps_dev_flow_table *flow_table;
3642 struct rps_dev_flow *rflow;
3643 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003644 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003645
3646 rcu_read_lock();
3647 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3648 if (flow_table && flow_id <= flow_table->mask) {
3649 rflow = &flow_table->flows[flow_id];
3650 cpu = ACCESS_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003651 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003652 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3653 rflow->last_qtail) <
3654 (int)(10 * flow_table->mask)))
3655 expire = false;
3656 }
3657 rcu_read_unlock();
3658 return expire;
3659}
3660EXPORT_SYMBOL(rps_may_expire_flow);
3661
3662#endif /* CONFIG_RFS_ACCEL */
3663
Tom Herbert0a9627f2010-03-16 08:03:29 +00003664/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003665static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003666{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003667 struct softnet_data *sd = data;
3668
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003669 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003670 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003671}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003672
Tom Herbertfec5e652010-04-16 16:01:27 -07003673#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003674
3675/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003676 * Check if this softnet_data structure is another cpu one
3677 * If yes, queue it to our IPI list and return 1
3678 * If no, return 0
3679 */
3680static int rps_ipi_queued(struct softnet_data *sd)
3681{
3682#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003683 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003684
3685 if (sd != mysd) {
3686 sd->rps_ipi_next = mysd->rps_ipi_list;
3687 mysd->rps_ipi_list = sd;
3688
3689 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3690 return 1;
3691 }
3692#endif /* CONFIG_RPS */
3693 return 0;
3694}
3695
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003696#ifdef CONFIG_NET_FLOW_LIMIT
3697int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3698#endif
3699
3700static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3701{
3702#ifdef CONFIG_NET_FLOW_LIMIT
3703 struct sd_flow_limit *fl;
3704 struct softnet_data *sd;
3705 unsigned int old_flow, new_flow;
3706
3707 if (qlen < (netdev_max_backlog >> 1))
3708 return false;
3709
Christoph Lameter903ceff2014-08-17 12:30:35 -05003710 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003711
3712 rcu_read_lock();
3713 fl = rcu_dereference(sd->flow_limit);
3714 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003715 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003716 old_flow = fl->history[fl->history_head];
3717 fl->history[fl->history_head] = new_flow;
3718
3719 fl->history_head++;
3720 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3721
3722 if (likely(fl->buckets[old_flow]))
3723 fl->buckets[old_flow]--;
3724
3725 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3726 fl->count++;
3727 rcu_read_unlock();
3728 return true;
3729 }
3730 }
3731 rcu_read_unlock();
3732#endif
3733 return false;
3734}
3735
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003736/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003737 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3738 * queue (may be a remote CPU queue).
3739 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003740static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3741 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003742{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003743 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003744 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003745 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003746
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003747 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003748
3749 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003750
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003751 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003752 if (!netif_running(skb->dev))
3753 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003754 qlen = skb_queue_len(&sd->input_pkt_queue);
3755 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003756 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003757enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003758 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003759 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003760 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003761 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003762 return NET_RX_SUCCESS;
3763 }
3764
Eric Dumazetebda37c22010-05-06 23:51:21 +00003765 /* Schedule NAPI for backlog device
3766 * We can use non atomic operation since we own the queue lock
3767 */
3768 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003769 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003770 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003771 }
3772 goto enqueue;
3773 }
3774
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003775drop:
Changli Gaodee42872010-05-02 05:42:16 +00003776 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003777 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003778
Tom Herbert0a9627f2010-03-16 08:03:29 +00003779 local_irq_restore(flags);
3780
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003781 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003782 kfree_skb(skb);
3783 return NET_RX_DROP;
3784}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003786static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003788 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
Eric Dumazet588f0332011-11-15 04:12:55 +00003790 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791
Koki Sanagicf66ba52010-08-23 18:45:02 +09003792 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003793#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003794 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003795 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003796 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797
Changli Gaocece1942010-08-07 20:35:43 -07003798 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003799 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003800
3801 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003802 if (cpu < 0)
3803 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003804
3805 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3806
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003807 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003808 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003809 } else
3810#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003811 {
3812 unsigned int qtail;
tchardingf4563a72017-02-09 17:56:07 +11003813
Tom Herbertfec5e652010-04-16 16:01:27 -07003814 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3815 put_cpu();
3816 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003817 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003819
3820/**
3821 * netif_rx - post buffer to the network code
3822 * @skb: buffer to post
3823 *
3824 * This function receives a packet from a device driver and queues it for
3825 * the upper (protocol) levels to process. It always succeeds. The buffer
3826 * may be dropped during processing for congestion control or by the
3827 * protocol layers.
3828 *
3829 * return values:
3830 * NET_RX_SUCCESS (no congestion)
3831 * NET_RX_DROP (packet was dropped)
3832 *
3833 */
3834
3835int netif_rx(struct sk_buff *skb)
3836{
3837 trace_netif_rx_entry(skb);
3838
3839 return netif_rx_internal(skb);
3840}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003841EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842
3843int netif_rx_ni(struct sk_buff *skb)
3844{
3845 int err;
3846
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003847 trace_netif_rx_ni_entry(skb);
3848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003850 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851 if (local_softirq_pending())
3852 do_softirq();
3853 preempt_enable();
3854
3855 return err;
3856}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857EXPORT_SYMBOL(netif_rx_ni);
3858
Emese Revfy0766f782016-06-20 20:42:34 +02003859static __latent_entropy void net_tx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860{
Christoph Lameter903ceff2014-08-17 12:30:35 -05003861 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862
3863 if (sd->completion_queue) {
3864 struct sk_buff *clist;
3865
3866 local_irq_disable();
3867 clist = sd->completion_queue;
3868 sd->completion_queue = NULL;
3869 local_irq_enable();
3870
3871 while (clist) {
3872 struct sk_buff *skb = clist;
tchardingf4563a72017-02-09 17:56:07 +11003873
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 clist = clist->next;
3875
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003876 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003877 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3878 trace_consume_skb(skb);
3879 else
3880 trace_kfree_skb(skb, net_tx_action);
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01003881
3882 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
3883 __kfree_skb(skb);
3884 else
3885 __kfree_skb_defer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 }
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01003887
3888 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 }
3890
3891 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003892 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893
3894 local_irq_disable();
3895 head = sd->output_queue;
3896 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003897 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 local_irq_enable();
3899
3900 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003901 struct Qdisc *q = head;
3902 spinlock_t *root_lock;
3903
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904 head = head->next_sched;
3905
David S. Miller5fb66222008-08-02 20:02:43 -07003906 root_lock = qdisc_lock(q);
Eric Dumazet3bcb8462016-06-04 20:02:28 -07003907 spin_lock(root_lock);
3908 /* We need to make sure head->next_sched is read
3909 * before clearing __QDISC_STATE_SCHED
3910 */
3911 smp_mb__before_atomic();
3912 clear_bit(__QDISC_STATE_SCHED, &q->state);
3913 qdisc_run(q);
3914 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 }
3916 }
3917}
3918
Javier Martinez Canillas181402a2016-09-09 08:43:15 -04003919#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
Michał Mirosławda678292009-06-05 05:35:28 +00003920/* This hook is defined here for ATM LANE */
3921int (*br_fdb_test_addr_hook)(struct net_device *dev,
3922 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003923EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003924#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003926static inline struct sk_buff *
3927sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3928 struct net_device *orig_dev)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003929{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003930#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003931 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3932 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00003933
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003934 /* If there's at least one ingress present somewhere (so
3935 * we get here via enabled static key), remaining devices
3936 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003937 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003938 */
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003939 if (!cl)
Daniel Borkmann45771392015-04-10 23:07:54 +02003940 return skb;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003941 if (*pt_prev) {
3942 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3943 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003944 }
3945
Florian Westphal33654952015-05-14 00:36:28 +02003946 qdisc_skb_cb(skb)->pkt_len = skb->len;
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003947 skb->tc_at_ingress = 1;
Eric Dumazet24ea5912015-07-06 05:18:03 -07003948 qdisc_bstats_cpu_update(cl->q, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003949
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02003950 switch (tc_classify(skb, cl, &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003951 case TC_ACT_OK:
3952 case TC_ACT_RECLASSIFY:
3953 skb->tc_index = TC_H_MIN(cl_res.classid);
3954 break;
3955 case TC_ACT_SHOT:
Eric Dumazet24ea5912015-07-06 05:18:03 -07003956 qdisc_qstats_cpu_drop(cl->q);
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07003957 kfree_skb(skb);
3958 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003959 case TC_ACT_STOLEN:
3960 case TC_ACT_QUEUED:
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07003961 consume_skb(skb);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003962 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07003963 case TC_ACT_REDIRECT:
3964 /* skb_mac_header check was done by cls/act_bpf, so
3965 * we can safely push the L2 header back before
3966 * redirecting to another netdev
3967 */
3968 __skb_push(skb, skb->mac_len);
3969 skb_do_redirect(skb);
3970 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003971 default:
3972 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003973 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003974#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07003975 return skb;
3976}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003978/**
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07003979 * netdev_is_rx_handler_busy - check if receive handler is registered
3980 * @dev: device to check
3981 *
3982 * Check if a receive handler is already registered for a given device.
3983 * Return true if there one.
3984 *
3985 * The caller must hold the rtnl_mutex.
3986 */
3987bool netdev_is_rx_handler_busy(struct net_device *dev)
3988{
3989 ASSERT_RTNL();
3990 return dev && rtnl_dereference(dev->rx_handler);
3991}
3992EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
3993
3994/**
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003995 * netdev_rx_handler_register - register receive handler
3996 * @dev: device to register a handler for
3997 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003998 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003999 *
Masanari Iidae2278672014-02-18 22:54:36 +09004000 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004001 * called from __netif_receive_skb. A negative errno code is returned
4002 * on a failure.
4003 *
4004 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004005 *
4006 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004007 */
4008int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00004009 rx_handler_func_t *rx_handler,
4010 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004011{
Mahesh Bandewar1b7cd002017-01-18 15:02:49 -08004012 if (netdev_is_rx_handler_busy(dev))
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004013 return -EBUSY;
4014
Eric Dumazet00cfec32013-03-29 03:01:22 +00004015 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00004016 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004017 rcu_assign_pointer(dev->rx_handler, rx_handler);
4018
4019 return 0;
4020}
4021EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4022
4023/**
4024 * netdev_rx_handler_unregister - unregister receive handler
4025 * @dev: device to unregister a handler from
4026 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00004027 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004028 *
4029 * The caller must hold the rtnl_mutex.
4030 */
4031void netdev_rx_handler_unregister(struct net_device *dev)
4032{
4033
4034 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004035 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00004036 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4037 * section has a guarantee to see a non NULL rx_handler_data
4038 * as well.
4039 */
4040 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00004041 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004042}
4043EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4044
Mel Gormanb4b9e352012-07-31 16:44:26 -07004045/*
4046 * Limit the use of PFMEMALLOC reserves to those protocols that implement
4047 * the special handling of PFMEMALLOC skbs.
4048 */
4049static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4050{
4051 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07004052 case htons(ETH_P_ARP):
4053 case htons(ETH_P_IP):
4054 case htons(ETH_P_IPV6):
4055 case htons(ETH_P_8021Q):
4056 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07004057 return true;
4058 default:
4059 return false;
4060 }
4061}
4062
Pablo Neirae687ad62015-05-13 18:19:38 +02004063static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4064 int *ret, struct net_device *orig_dev)
4065{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004066#ifdef CONFIG_NETFILTER_INGRESS
Pablo Neirae687ad62015-05-13 18:19:38 +02004067 if (nf_hook_ingress_active(skb)) {
Aaron Conole2c1e2702016-09-21 11:35:03 -04004068 int ingress_retval;
4069
Pablo Neirae687ad62015-05-13 18:19:38 +02004070 if (*pt_prev) {
4071 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4072 *pt_prev = NULL;
4073 }
4074
Aaron Conole2c1e2702016-09-21 11:35:03 -04004075 rcu_read_lock();
4076 ingress_retval = nf_hook_ingress(skb);
4077 rcu_read_unlock();
4078 return ingress_retval;
Pablo Neirae687ad62015-05-13 18:19:38 +02004079 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004080#endif /* CONFIG_NETFILTER_INGRESS */
Pablo Neirae687ad62015-05-13 18:19:38 +02004081 return 0;
4082}
Pablo Neirae687ad62015-05-13 18:19:38 +02004083
David S. Miller9754e292013-02-14 15:57:38 -05004084static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085{
4086 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004087 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07004088 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004089 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08004091 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092
Eric Dumazet588f0332011-11-15 04:12:55 +00004093 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07004094
Koki Sanagicf66ba52010-08-23 18:45:02 +09004095 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08004096
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07004097 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00004098
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07004099 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00004100 if (!skb_transport_header_was_set(skb))
4101 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00004102 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103
4104 pt_prev = NULL;
4105
David S. Miller63d8ea72011-02-28 10:48:59 -08004106another_round:
David S. Millerb6858172012-07-23 16:27:54 -07004107 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08004108
4109 __this_cpu_inc(softnet_data.processed);
4110
Patrick McHardy8ad227f2013-04-19 02:04:31 +00004111 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4112 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04004113 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004114 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004115 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00004116 }
4117
Willem de Bruijne7246e12017-01-07 17:06:35 -05004118 if (skb_skip_tc_classify(skb))
4119 goto skip_classify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120
David S. Miller9754e292013-02-14 15:57:38 -05004121 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07004122 goto skip_taps;
4123
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08004125 if (pt_prev)
4126 ret = deliver_skb(skb, pt_prev, orig_dev);
4127 pt_prev = ptype;
4128 }
4129
4130 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4131 if (pt_prev)
4132 ret = deliver_skb(skb, pt_prev, orig_dev);
4133 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134 }
4135
Mel Gormanb4b9e352012-07-31 16:44:26 -07004136skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02004137#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02004138 if (static_key_false(&ingress_needed)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004139 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
Daniel Borkmann45771392015-04-10 23:07:54 +02004140 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004141 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02004142
4143 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03004144 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02004145 }
Pablo Neira1cf519002015-05-13 18:19:37 +02004146#endif
Willem de Bruijna5135bc2017-01-07 17:06:36 -05004147 skb_reset_tc(skb);
Willem de Bruijne7246e12017-01-07 17:06:35 -05004148skip_classify:
David S. Miller9754e292013-02-14 15:57:38 -05004149 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07004150 goto drop;
4151
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004152 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00004153 if (pt_prev) {
4154 ret = deliver_skb(skb, pt_prev, orig_dev);
4155 pt_prev = NULL;
4156 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004157 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00004158 goto another_round;
4159 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03004160 goto out;
John Fastabend24257172011-10-10 09:16:41 +00004161 }
4162
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004163 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004164 if (rx_handler) {
4165 if (pt_prev) {
4166 ret = deliver_skb(skb, pt_prev, orig_dev);
4167 pt_prev = NULL;
4168 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004169 switch (rx_handler(&skb)) {
4170 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00004171 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004172 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004173 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08004174 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00004175 case RX_HANDLER_EXACT:
4176 deliver_exact = true;
4177 case RX_HANDLER_PASS:
4178 break;
4179 default:
4180 BUG();
4181 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01004184 if (unlikely(skb_vlan_tag_present(skb))) {
4185 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07004186 skb->pkt_type = PACKET_OTHERHOST;
4187 /* Note: we might in the future use prio bits
4188 * and set skb->priority like in vlan_do_receive()
4189 * For the time being, just ignore Priority Code Point
4190 */
4191 skb->vlan_tci = 0;
4192 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00004193
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08004195
4196 /* deliver only exact match when indicated */
4197 if (likely(!deliver_exact)) {
4198 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4199 &ptype_base[ntohs(type) &
4200 PTYPE_HASH_MASK]);
4201 }
4202
4203 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4204 &orig_dev->ptype_specific);
4205
4206 if (unlikely(skb->dev != orig_dev)) {
4207 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4208 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 }
4210
4211 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004212 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00004213 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00004214 else
4215 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07004217drop:
Jarod Wilson6e7333d2016-02-01 18:51:05 -05004218 if (!deliver_exact)
4219 atomic_long_inc(&skb->dev->rx_dropped);
4220 else
4221 atomic_long_inc(&skb->dev->rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 kfree_skb(skb);
4223 /* Jamal, now you will not able to escape explaining
4224 * me how you were going to use this. :-)
4225 */
4226 ret = NET_RX_DROP;
4227 }
4228
Julian Anastasov2c17d272015-07-09 09:59:10 +03004229out:
David S. Miller9754e292013-02-14 15:57:38 -05004230 return ret;
4231}
4232
4233static int __netif_receive_skb(struct sk_buff *skb)
4234{
4235 int ret;
4236
4237 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4238 unsigned long pflags = current->flags;
4239
4240 /*
4241 * PFMEMALLOC skbs are special, they should
4242 * - be delivered to SOCK_MEMALLOC sockets only
4243 * - stay away from userspace
4244 * - have bounded memory usage
4245 *
4246 * Use PF_MEMALLOC as this saves us from propagating the allocation
4247 * context down to all allocation sites.
4248 */
4249 current->flags |= PF_MEMALLOC;
4250 ret = __netif_receive_skb_core(skb, true);
NeilBrown717a94b2017-04-07 10:03:26 +10004251 current_restore_flags(pflags, PF_MEMALLOC);
David S. Miller9754e292013-02-14 15:57:38 -05004252 } else
4253 ret = __netif_receive_skb_core(skb, false);
4254
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 return ret;
4256}
Tom Herbert0a9627f2010-03-16 08:03:29 +00004257
David S. Millerb5cdae32017-04-18 15:36:58 -04004258static struct static_key generic_xdp_needed __read_mostly;
4259
4260static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
4261{
4262 struct bpf_prog *new = xdp->prog;
4263 int ret = 0;
4264
4265 switch (xdp->command) {
4266 case XDP_SETUP_PROG: {
4267 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
4268
4269 rcu_assign_pointer(dev->xdp_prog, new);
4270 if (old)
4271 bpf_prog_put(old);
4272
4273 if (old && !new) {
4274 static_key_slow_dec(&generic_xdp_needed);
4275 } else if (new && !old) {
4276 static_key_slow_inc(&generic_xdp_needed);
4277 dev_disable_lro(dev);
4278 }
4279 break;
4280 }
4281
4282 case XDP_QUERY_PROG:
4283 xdp->prog_attached = !!rcu_access_pointer(dev->xdp_prog);
4284 break;
4285
4286 default:
4287 ret = -EINVAL;
4288 break;
4289 }
4290
4291 return ret;
4292}
4293
4294static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4295 struct bpf_prog *xdp_prog)
4296{
4297 struct xdp_buff xdp;
4298 u32 act = XDP_DROP;
4299 void *orig_data;
4300 int hlen, off;
4301 u32 mac_len;
4302
4303 /* Reinjected packets coming from act_mirred or similar should
4304 * not get XDP generic processing.
4305 */
4306 if (skb_cloned(skb))
4307 return XDP_PASS;
4308
4309 if (skb_linearize(skb))
4310 goto do_drop;
4311
4312 /* The XDP program wants to see the packet starting at the MAC
4313 * header.
4314 */
4315 mac_len = skb->data - skb_mac_header(skb);
4316 hlen = skb_headlen(skb) + mac_len;
4317 xdp.data = skb->data - mac_len;
4318 xdp.data_end = xdp.data + hlen;
4319 xdp.data_hard_start = skb->data - skb_headroom(skb);
4320 orig_data = xdp.data;
4321
4322 act = bpf_prog_run_xdp(xdp_prog, &xdp);
4323
4324 off = xdp.data - orig_data;
4325 if (off > 0)
4326 __skb_pull(skb, off);
4327 else if (off < 0)
4328 __skb_push(skb, -off);
4329
4330 switch (act) {
4331 case XDP_TX:
4332 __skb_push(skb, mac_len);
4333 /* fall through */
4334 case XDP_PASS:
4335 break;
4336
4337 default:
4338 bpf_warn_invalid_xdp_action(act);
4339 /* fall through */
4340 case XDP_ABORTED:
4341 trace_xdp_exception(skb->dev, xdp_prog, act);
4342 /* fall through */
4343 case XDP_DROP:
4344 do_drop:
4345 kfree_skb(skb);
4346 break;
4347 }
4348
4349 return act;
4350}
4351
4352/* When doing generic XDP we have to bypass the qdisc layer and the
4353 * network taps in order to match in-driver-XDP behavior.
4354 */
4355static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4356{
4357 struct net_device *dev = skb->dev;
4358 struct netdev_queue *txq;
4359 bool free_skb = true;
4360 int cpu, rc;
4361
4362 txq = netdev_pick_tx(dev, skb, NULL);
4363 cpu = smp_processor_id();
4364 HARD_TX_LOCK(dev, txq, cpu);
4365 if (!netif_xmit_stopped(txq)) {
4366 rc = netdev_start_xmit(skb, dev, txq, 0);
4367 if (dev_xmit_complete(rc))
4368 free_skb = false;
4369 }
4370 HARD_TX_UNLOCK(dev, txq);
4371 if (free_skb) {
4372 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4373 kfree_skb(skb);
4374 }
4375}
4376
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004377static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004378{
Julian Anastasov2c17d272015-07-09 09:59:10 +03004379 int ret;
4380
Eric Dumazet588f0332011-11-15 04:12:55 +00004381 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07004382
Richard Cochranc1f19b52010-07-17 08:49:36 +00004383 if (skb_defer_rx_timestamp(skb))
4384 return NET_RX_SUCCESS;
4385
Julian Anastasov2c17d272015-07-09 09:59:10 +03004386 rcu_read_lock();
4387
David S. Millerb5cdae32017-04-18 15:36:58 -04004388 if (static_key_false(&generic_xdp_needed)) {
4389 struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog);
4390
4391 if (xdp_prog) {
4392 u32 act = netif_receive_generic_xdp(skb, xdp_prog);
4393
4394 if (act != XDP_PASS) {
4395 rcu_read_unlock();
4396 if (act == XDP_TX)
4397 generic_xdp_tx(skb, xdp_prog);
4398 return NET_RX_DROP;
4399 }
4400 }
4401 }
4402
Eric Dumazetdf334542010-03-24 19:13:54 +00004403#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01004404 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07004405 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004406 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07004407
Eric Dumazet3b098e22010-05-15 23:57:10 -07004408 if (cpu >= 0) {
4409 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4410 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00004411 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004412 }
Tom Herbertfec5e652010-04-16 16:01:27 -07004413 }
Tom Herbert1e94d722010-03-18 17:45:44 -07004414#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03004415 ret = __netif_receive_skb(skb);
4416 rcu_read_unlock();
4417 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004418}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004419
4420/**
4421 * netif_receive_skb - process receive buffer from network
4422 * @skb: buffer to process
4423 *
4424 * netif_receive_skb() is the main receive data processing function.
4425 * It always succeeds. The buffer may be dropped during processing
4426 * for congestion control or by the protocol layers.
4427 *
4428 * This function may only be called from softirq context and interrupts
4429 * should be enabled.
4430 *
4431 * Return values (usually ignored):
4432 * NET_RX_SUCCESS: no congestion
4433 * NET_RX_DROP: packet was dropped
4434 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004435int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004436{
4437 trace_netif_receive_skb_entry(skb);
4438
4439 return netif_receive_skb_internal(skb);
4440}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004441EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442
Eric Dumazet41852492016-08-26 12:50:39 -07004443DEFINE_PER_CPU(struct work_struct, flush_works);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004444
4445/* Network device is going away, flush any packets still pending */
4446static void flush_backlog(struct work_struct *work)
4447{
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004448 struct sk_buff *skb, *tmp;
4449 struct softnet_data *sd;
4450
4451 local_bh_disable();
4452 sd = this_cpu_ptr(&softnet_data);
4453
4454 local_irq_disable();
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004455 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004456 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004457 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004458 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004459 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004460 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004461 }
Changli Gao6e7676c2010-04-27 15:07:33 -07004462 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004463 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004464 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004465
4466 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07004467 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Changli Gao6e7676c2010-04-27 15:07:33 -07004468 __skb_unlink(skb, &sd->process_queue);
4469 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004470 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004471 }
4472 }
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004473 local_bh_enable();
4474}
4475
Eric Dumazet41852492016-08-26 12:50:39 -07004476static void flush_all_backlogs(void)
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004477{
4478 unsigned int cpu;
4479
4480 get_online_cpus();
4481
Eric Dumazet41852492016-08-26 12:50:39 -07004482 for_each_online_cpu(cpu)
4483 queue_work_on(cpu, system_highpri_wq,
4484 per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004485
4486 for_each_online_cpu(cpu)
Eric Dumazet41852492016-08-26 12:50:39 -07004487 flush_work(per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004488
4489 put_online_cpus();
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004490}
4491
Herbert Xud565b0a2008-12-15 23:38:52 -08004492static int napi_gro_complete(struct sk_buff *skb)
4493{
Vlad Yasevich22061d82012-11-15 08:49:11 +00004494 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004495 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004496 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08004497 int err = -ENOENT;
4498
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004499 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4500
Herbert Xufc59f9a2009-04-14 15:11:06 -07004501 if (NAPI_GRO_CB(skb)->count == 1) {
4502 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004503 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07004504 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004505
4506 rcu_read_lock();
4507 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004508 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08004509 continue;
4510
Jerry Chu299603e82013-12-11 20:53:45 -08004511 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004512 break;
4513 }
4514 rcu_read_unlock();
4515
4516 if (err) {
4517 WARN_ON(&ptype->list == head);
4518 kfree_skb(skb);
4519 return NET_RX_SUCCESS;
4520 }
4521
4522out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004523 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004524}
4525
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004526/* napi->gro_list contains packets ordered by age.
4527 * youngest packets at the head of it.
4528 * Complete skbs in reverse order to reduce latencies.
4529 */
4530void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08004531{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004532 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004533
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004534 /* scan list and build reverse chain */
4535 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4536 skb->prev = prev;
4537 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08004538 }
4539
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004540 for (skb = prev; skb; skb = prev) {
4541 skb->next = NULL;
4542
4543 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4544 return;
4545
4546 prev = skb->prev;
4547 napi_gro_complete(skb);
4548 napi->gro_count--;
4549 }
4550
Herbert Xud565b0a2008-12-15 23:38:52 -08004551 napi->gro_list = NULL;
4552}
Eric Dumazet86cac582010-08-31 18:25:32 +00004553EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08004554
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004555static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4556{
4557 struct sk_buff *p;
4558 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08004559 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004560
4561 for (p = napi->gro_list; p; p = p->next) {
4562 unsigned long diffs;
4563
Tom Herbert0b4cec82014-01-15 08:58:06 -08004564 NAPI_GRO_CB(p)->flush = 0;
4565
4566 if (hash != skb_get_hash_raw(p)) {
4567 NAPI_GRO_CB(p)->same_flow = 0;
4568 continue;
4569 }
4570
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004571 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4572 diffs |= p->vlan_tci ^ skb->vlan_tci;
Jesse Grossce87fc62016-01-20 17:59:49 -08004573 diffs |= skb_metadata_dst_cmp(p, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004574 if (maclen == ETH_HLEN)
4575 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004576 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004577 else if (!diffs)
4578 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004579 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004580 maclen);
4581 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004582 }
4583}
4584
Jerry Chu299603e82013-12-11 20:53:45 -08004585static void skb_gro_reset_offset(struct sk_buff *skb)
4586{
4587 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4588 const skb_frag_t *frag0 = &pinfo->frags[0];
4589
4590 NAPI_GRO_CB(skb)->data_offset = 0;
4591 NAPI_GRO_CB(skb)->frag0 = NULL;
4592 NAPI_GRO_CB(skb)->frag0_len = 0;
4593
4594 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4595 pinfo->nr_frags &&
4596 !PageHighMem(skb_frag_page(frag0))) {
4597 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
Eric Dumazet7cfd5fd2017-01-10 19:52:43 -08004598 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4599 skb_frag_size(frag0),
4600 skb->end - skb->tail);
Herbert Xud565b0a2008-12-15 23:38:52 -08004601 }
4602}
4603
Eric Dumazeta50e2332014-03-29 21:28:21 -07004604static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4605{
4606 struct skb_shared_info *pinfo = skb_shinfo(skb);
4607
4608 BUG_ON(skb->end - skb->tail < grow);
4609
4610 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4611
4612 skb->data_len -= grow;
4613 skb->tail += grow;
4614
4615 pinfo->frags[0].page_offset += grow;
4616 skb_frag_size_sub(&pinfo->frags[0], grow);
4617
4618 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4619 skb_frag_unref(skb, 0);
4620 memmove(pinfo->frags, pinfo->frags + 1,
4621 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4622 }
4623}
4624
Rami Rosenbb728822012-11-28 21:55:25 +00004625static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004626{
4627 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004628 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004629 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004630 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004631 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004632 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004633 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004634
David S. Millerb5cdae32017-04-18 15:36:58 -04004635 if (netif_elide_gro(skb->dev))
Herbert Xud565b0a2008-12-15 23:38:52 -08004636 goto normal;
4637
Eric Dumazetd61d0722016-11-07 11:12:27 -08004638 if (skb->csum_bad)
Herbert Xuf17f5c92009-01-14 14:36:12 -08004639 goto normal;
4640
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004641 gro_list_prepare(napi, skb);
4642
Herbert Xud565b0a2008-12-15 23:38:52 -08004643 rcu_read_lock();
4644 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004645 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004646 continue;
4647
Herbert Xu86911732009-01-29 14:19:50 +00004648 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004649 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004650 NAPI_GRO_CB(skb)->same_flow = 0;
Eric Dumazetd61d0722016-11-07 11:12:27 -08004651 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
Herbert Xu5d38a072009-01-04 16:13:40 -08004652 NAPI_GRO_CB(skb)->free = 0;
Jesse Grossfac8e0f2016-03-19 09:32:01 -07004653 NAPI_GRO_CB(skb)->encap_mark = 0;
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02004654 NAPI_GRO_CB(skb)->recursion_counter = 0;
Alexander Duycka0ca1532016-04-05 09:13:39 -07004655 NAPI_GRO_CB(skb)->is_fou = 0;
Alexander Duyck15305452016-04-10 21:44:57 -04004656 NAPI_GRO_CB(skb)->is_atomic = 1;
Tom Herbert15e23962015-02-10 16:30:31 -08004657 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004658
Tom Herbert662880f2014-08-27 21:26:56 -07004659 /* Setup for GRO checksum validation */
4660 switch (skb->ip_summed) {
4661 case CHECKSUM_COMPLETE:
4662 NAPI_GRO_CB(skb)->csum = skb->csum;
4663 NAPI_GRO_CB(skb)->csum_valid = 1;
4664 NAPI_GRO_CB(skb)->csum_cnt = 0;
4665 break;
4666 case CHECKSUM_UNNECESSARY:
4667 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4668 NAPI_GRO_CB(skb)->csum_valid = 0;
4669 break;
4670 default:
4671 NAPI_GRO_CB(skb)->csum_cnt = 0;
4672 NAPI_GRO_CB(skb)->csum_valid = 0;
4673 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004674
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004675 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004676 break;
4677 }
4678 rcu_read_unlock();
4679
4680 if (&ptype->list == head)
4681 goto normal;
4682
Steffen Klassert25393d32017-02-15 09:39:44 +01004683 if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
4684 ret = GRO_CONSUMED;
4685 goto ok;
4686 }
4687
Herbert Xu0da2afd52008-12-26 14:57:42 -08004688 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004689 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004690
Herbert Xud565b0a2008-12-15 23:38:52 -08004691 if (pp) {
4692 struct sk_buff *nskb = *pp;
4693
4694 *pp = nskb->next;
4695 nskb->next = NULL;
4696 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00004697 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08004698 }
4699
Herbert Xu0da2afd52008-12-26 14:57:42 -08004700 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08004701 goto ok;
4702
Eric Dumazet600adc12014-01-09 14:12:19 -08004703 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08004704 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08004705
Eric Dumazet600adc12014-01-09 14:12:19 -08004706 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4707 struct sk_buff *nskb = napi->gro_list;
4708
4709 /* locate the end of the list to select the 'oldest' flow */
4710 while (nskb->next) {
4711 pp = &nskb->next;
4712 nskb = *pp;
4713 }
4714 *pp = NULL;
4715 nskb->next = NULL;
4716 napi_gro_complete(nskb);
4717 } else {
4718 napi->gro_count++;
4719 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004720 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004721 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004722 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004723 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004724 skb->next = napi->gro_list;
4725 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004726 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004727
Herbert Xuad0f9902009-02-01 01:24:55 -08004728pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004729 grow = skb_gro_offset(skb) - skb_headlen(skb);
4730 if (grow > 0)
4731 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004732ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004733 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004734
4735normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004736 ret = GRO_NORMAL;
4737 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004738}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004739
Jerry Chubf5a7552014-01-07 10:23:19 -08004740struct packet_offload *gro_find_receive_by_type(__be16 type)
4741{
4742 struct list_head *offload_head = &offload_base;
4743 struct packet_offload *ptype;
4744
4745 list_for_each_entry_rcu(ptype, offload_head, list) {
4746 if (ptype->type != type || !ptype->callbacks.gro_receive)
4747 continue;
4748 return ptype;
4749 }
4750 return NULL;
4751}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004752EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004753
4754struct packet_offload *gro_find_complete_by_type(__be16 type)
4755{
4756 struct list_head *offload_head = &offload_base;
4757 struct packet_offload *ptype;
4758
4759 list_for_each_entry_rcu(ptype, offload_head, list) {
4760 if (ptype->type != type || !ptype->callbacks.gro_complete)
4761 continue;
4762 return ptype;
4763 }
4764 return NULL;
4765}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004766EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004767
Rami Rosenbb728822012-11-28 21:55:25 +00004768static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004769{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004770 switch (ret) {
4771 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004772 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004773 ret = GRO_DROP;
4774 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004775
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004776 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004777 kfree_skb(skb);
4778 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004779
Eric Dumazetdaa86542012-04-19 07:07:40 +00004780 case GRO_MERGED_FREE:
Jesse Grossce87fc62016-01-20 17:59:49 -08004781 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4782 skb_dst_drop(skb);
Steffen Klassertf991bb92017-01-30 06:45:38 +01004783 secpath_reset(skb);
Eric Dumazetd7e88832012-04-30 08:10:34 +00004784 kmem_cache_free(skbuff_head_cache, skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004785 } else {
Eric Dumazetd7e88832012-04-30 08:10:34 +00004786 __kfree_skb(skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08004787 }
Eric Dumazetdaa86542012-04-19 07:07:40 +00004788 break;
4789
Ben Hutchings5b252f02009-10-29 07:17:09 +00004790 case GRO_HELD:
4791 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004792 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004793 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004794 }
4795
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004796 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004797}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004798
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004799gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004800{
Eric Dumazet93f93a42015-11-18 06:30:59 -08004801 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004802 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004803
Eric Dumazeta50e2332014-03-29 21:28:21 -07004804 skb_gro_reset_offset(skb);
4805
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004806 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004807}
4808EXPORT_SYMBOL(napi_gro_receive);
4809
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004810static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004811{
Eric Dumazet93a35f52014-10-23 06:30:30 -07004812 if (unlikely(skb->pfmemalloc)) {
4813 consume_skb(skb);
4814 return;
4815 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004816 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004817 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4818 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004819 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004820 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004821 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07004822 skb->encapsulation = 0;
4823 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004824 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Steffen Klassertf991bb92017-01-30 06:45:38 +01004825 secpath_reset(skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004826
4827 napi->skb = skb;
4828}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004829
Herbert Xu76620aa2009-04-16 02:02:07 -07004830struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004831{
Herbert Xu5d38a072009-01-04 16:13:40 -08004832 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004833
4834 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08004835 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08004836 if (skb) {
4837 napi->skb = skb;
4838 skb_mark_napi_id(skb, napi);
4839 }
Herbert Xu5d38a072009-01-04 16:13:40 -08004840 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004841 return skb;
4842}
Herbert Xu76620aa2009-04-16 02:02:07 -07004843EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004844
Eric Dumazeta50e2332014-03-29 21:28:21 -07004845static gro_result_t napi_frags_finish(struct napi_struct *napi,
4846 struct sk_buff *skb,
4847 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004848{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004849 switch (ret) {
4850 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004851 case GRO_HELD:
4852 __skb_push(skb, ETH_HLEN);
4853 skb->protocol = eth_type_trans(skb, skb->dev);
4854 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004855 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004856 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004857
4858 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004859 case GRO_MERGED_FREE:
4860 napi_reuse_skb(napi, skb);
4861 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004862
4863 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01004864 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004865 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004866 }
4867
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004868 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004869}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004870
Eric Dumazeta50e2332014-03-29 21:28:21 -07004871/* Upper GRO stack assumes network header starts at gro_offset=0
4872 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4873 * We copy ethernet header into skb->data to have a common layout.
4874 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004875static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004876{
Herbert Xu76620aa2009-04-16 02:02:07 -07004877 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004878 const struct ethhdr *eth;
4879 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07004880
4881 napi->skb = NULL;
4882
Eric Dumazeta50e2332014-03-29 21:28:21 -07004883 skb_reset_mac_header(skb);
4884 skb_gro_reset_offset(skb);
4885
4886 eth = skb_gro_header_fast(skb, 0);
4887 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4888 eth = skb_gro_header_slow(skb, hlen, 0);
4889 if (unlikely(!eth)) {
Aaron Conole4da46ce2016-04-02 15:26:43 -04004890 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
4891 __func__, napi->dev->name);
Eric Dumazeta50e2332014-03-29 21:28:21 -07004892 napi_reuse_skb(napi, skb);
4893 return NULL;
4894 }
4895 } else {
4896 gro_pull_from_frag0(skb, hlen);
4897 NAPI_GRO_CB(skb)->frag0 += hlen;
4898 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07004899 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07004900 __skb_pull(skb, hlen);
4901
4902 /*
4903 * This works because the only protocols we care about don't require
4904 * special handling.
4905 * We'll fix it up properly in napi_frags_finish()
4906 */
4907 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07004908
Herbert Xu76620aa2009-04-16 02:02:07 -07004909 return skb;
4910}
Herbert Xu76620aa2009-04-16 02:02:07 -07004911
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004912gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004913{
4914 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004915
4916 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004917 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004918
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004919 trace_napi_gro_frags_entry(skb);
4920
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004921 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004922}
4923EXPORT_SYMBOL(napi_gro_frags);
4924
Tom Herbert573e8fc2014-08-22 13:33:47 -07004925/* Compute the checksum from gro_offset and return the folded value
4926 * after adding in any pseudo checksum.
4927 */
4928__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4929{
4930 __wsum wsum;
4931 __sum16 sum;
4932
4933 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4934
4935 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4936 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4937 if (likely(!sum)) {
4938 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4939 !skb->csum_complete_sw)
4940 netdev_rx_csum_fault(skb->dev);
4941 }
4942
4943 NAPI_GRO_CB(skb)->csum = wsum;
4944 NAPI_GRO_CB(skb)->csum_valid = 1;
4945
4946 return sum;
4947}
4948EXPORT_SYMBOL(__skb_gro_checksum_complete);
4949
Eric Dumazete326bed2010-04-22 00:22:45 -07004950/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08004951 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07004952 * Note: called with local irq disabled, but exits with local irq enabled.
4953 */
4954static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4955{
4956#ifdef CONFIG_RPS
4957 struct softnet_data *remsd = sd->rps_ipi_list;
4958
4959 if (remsd) {
4960 sd->rps_ipi_list = NULL;
4961
4962 local_irq_enable();
4963
4964 /* Send pending IPI's to kick RPS processing on remote cpus. */
4965 while (remsd) {
4966 struct softnet_data *next = remsd->rps_ipi_next;
4967
4968 if (cpu_online(remsd->cpu))
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +01004969 smp_call_function_single_async(remsd->cpu,
Frederic Weisbeckerfce8ad12014-02-24 16:40:01 +01004970 &remsd->csd);
Eric Dumazete326bed2010-04-22 00:22:45 -07004971 remsd = next;
4972 }
4973 } else
4974#endif
4975 local_irq_enable();
4976}
4977
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004978static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4979{
4980#ifdef CONFIG_RPS
4981 return sd->rps_ipi_list != NULL;
4982#else
4983 return false;
4984#endif
4985}
4986
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004987static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988{
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004989 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02004990 bool again = true;
4991 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992
Eric Dumazete326bed2010-04-22 00:22:45 -07004993 /* Check if we have pending ipi, its better to send them now,
4994 * not waiting net_rx_action() end.
4995 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004996 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07004997 local_irq_disable();
4998 net_rps_action_and_irq_enable(sd);
4999 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005000
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01005001 napi->weight = dev_rx_weight;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005002 while (again) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005004
Changli Gao6e7676c2010-04-27 15:07:33 -07005005 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03005006 rcu_read_lock();
Changli Gao6e7676c2010-04-27 15:07:33 -07005007 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03005008 rcu_read_unlock();
Tom Herbert76cc8b12010-05-20 18:37:59 +00005009 input_queue_head_incr(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005010 if (++work >= quota)
Tom Herbert76cc8b12010-05-20 18:37:59 +00005011 return work;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005012
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005015 local_irq_disable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005016 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07005017 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005018 /*
5019 * Inline a custom version of __napi_complete().
5020 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07005021 * and NAPI_STATE_SCHED is the only possible flag set
5022 * on backlog.
5023 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005024 * and we dont need an smp_mb() memory barrier.
5025 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07005026 napi->state = 0;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005027 again = false;
5028 } else {
5029 skb_queue_splice_tail_init(&sd->input_pkt_queue,
5030 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07005031 }
5032 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005033 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005036 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037}
5038
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005039/**
5040 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005041 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005042 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005043 * The entry's receive function will be scheduled to run.
5044 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005045 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08005046void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005047{
5048 unsigned long flags;
5049
5050 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05005051 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005052 local_irq_restore(flags);
5053}
5054EXPORT_SYMBOL(__napi_schedule);
5055
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005056/**
Eric Dumazet39e6c822017-02-28 10:34:50 -08005057 * napi_schedule_prep - check if napi can be scheduled
5058 * @n: napi context
5059 *
5060 * Test if NAPI routine is already running, and if not mark
5061 * it as running. This is used as a condition variable
5062 * insure only one NAPI poll instance runs. We also make
5063 * sure there is no pending NAPI disable.
5064 */
5065bool napi_schedule_prep(struct napi_struct *n)
5066{
5067 unsigned long val, new;
5068
5069 do {
5070 val = READ_ONCE(n->state);
5071 if (unlikely(val & NAPIF_STATE_DISABLE))
5072 return false;
5073 new = val | NAPIF_STATE_SCHED;
5074
5075 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5076 * This was suggested by Alexander Duyck, as compiler
5077 * emits better code than :
5078 * if (val & NAPIF_STATE_SCHED)
5079 * new |= NAPIF_STATE_MISSED;
5080 */
5081 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5082 NAPIF_STATE_MISSED;
5083 } while (cmpxchg(&n->state, val, new) != val);
5084
5085 return !(val & NAPIF_STATE_SCHED);
5086}
5087EXPORT_SYMBOL(napi_schedule_prep);
5088
5089/**
Eric Dumazetbc9ad162014-10-28 18:05:13 -07005090 * __napi_schedule_irqoff - schedule for receive
5091 * @n: entry to schedule
5092 *
5093 * Variant of __napi_schedule() assuming hard irqs are masked
5094 */
5095void __napi_schedule_irqoff(struct napi_struct *n)
5096{
5097 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5098}
5099EXPORT_SYMBOL(__napi_schedule_irqoff);
5100
Eric Dumazet364b6052016-11-15 10:15:13 -08005101bool napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08005102{
Eric Dumazet39e6c822017-02-28 10:34:50 -08005103 unsigned long flags, val, new;
Herbert Xud565b0a2008-12-15 23:38:52 -08005104
5105 /*
Eric Dumazet217f6972016-11-15 10:15:11 -08005106 * 1) Don't let napi dequeue from the cpu poll list
5107 * just in case its running on a different cpu.
5108 * 2) If we are busy polling, do nothing here, we have
5109 * the guarantee we will be called later.
Herbert Xud565b0a2008-12-15 23:38:52 -08005110 */
Eric Dumazet217f6972016-11-15 10:15:11 -08005111 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5112 NAPIF_STATE_IN_BUSY_POLL)))
Eric Dumazet364b6052016-11-15 10:15:13 -08005113 return false;
Herbert Xud565b0a2008-12-15 23:38:52 -08005114
Eric Dumazet3b47d302014-11-06 21:09:44 -08005115 if (n->gro_list) {
5116 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005117
Eric Dumazet3b47d302014-11-06 21:09:44 -08005118 if (work_done)
5119 timeout = n->dev->gro_flush_timeout;
5120
5121 if (timeout)
5122 hrtimer_start(&n->timer, ns_to_ktime(timeout),
5123 HRTIMER_MODE_REL_PINNED);
5124 else
5125 napi_gro_flush(n, false);
5126 }
Eric Dumazet02c16022017-02-04 15:25:02 -08005127 if (unlikely(!list_empty(&n->poll_list))) {
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005128 /* If n->poll_list is not empty, we need to mask irqs */
5129 local_irq_save(flags);
Eric Dumazet02c16022017-02-04 15:25:02 -08005130 list_del_init(&n->poll_list);
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005131 local_irq_restore(flags);
5132 }
Eric Dumazet39e6c822017-02-28 10:34:50 -08005133
5134 do {
5135 val = READ_ONCE(n->state);
5136
5137 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5138
5139 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5140
5141 /* If STATE_MISSED was set, leave STATE_SCHED set,
5142 * because we will call napi->poll() one more time.
5143 * This C code was suggested by Alexander Duyck to help gcc.
5144 */
5145 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5146 NAPIF_STATE_SCHED;
5147 } while (cmpxchg(&n->state, val, new) != val);
5148
5149 if (unlikely(val & NAPIF_STATE_MISSED)) {
5150 __napi_schedule(n);
5151 return false;
5152 }
5153
Eric Dumazet364b6052016-11-15 10:15:13 -08005154 return true;
Herbert Xud565b0a2008-12-15 23:38:52 -08005155}
Eric Dumazet3b47d302014-11-06 21:09:44 -08005156EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08005157
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005158/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08005159static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005160{
5161 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5162 struct napi_struct *napi;
5163
5164 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5165 if (napi->napi_id == napi_id)
5166 return napi;
5167
5168 return NULL;
5169}
Eric Dumazet02d62e82015-11-18 06:30:52 -08005170
5171#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazet217f6972016-11-15 10:15:11 -08005172
Eric Dumazetce6aea92015-11-18 06:30:54 -08005173#define BUSY_POLL_BUDGET 8
Eric Dumazet217f6972016-11-15 10:15:11 -08005174
5175static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5176{
5177 int rc;
5178
Eric Dumazet39e6c822017-02-28 10:34:50 -08005179 /* Busy polling means there is a high chance device driver hard irq
5180 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5181 * set in napi_schedule_prep().
5182 * Since we are about to call napi->poll() once more, we can safely
5183 * clear NAPI_STATE_MISSED.
5184 *
5185 * Note: x86 could use a single "lock and ..." instruction
5186 * to perform these two clear_bit()
5187 */
5188 clear_bit(NAPI_STATE_MISSED, &napi->state);
Eric Dumazet217f6972016-11-15 10:15:11 -08005189 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5190
5191 local_bh_disable();
5192
5193 /* All we really want here is to re-enable device interrupts.
5194 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5195 */
5196 rc = napi->poll(napi, BUSY_POLL_BUDGET);
5197 netpoll_poll_unlock(have_poll_lock);
5198 if (rc == BUSY_POLL_BUDGET)
5199 __napi_schedule(napi);
5200 local_bh_enable();
5201 if (local_softirq_pending())
5202 do_softirq();
5203}
5204
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005205void napi_busy_loop(unsigned int napi_id,
5206 bool (*loop_end)(void *, unsigned long),
5207 void *loop_end_arg)
Eric Dumazet02d62e82015-11-18 06:30:52 -08005208{
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005209 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
Eric Dumazet217f6972016-11-15 10:15:11 -08005210 int (*napi_poll)(struct napi_struct *napi, int budget);
Eric Dumazet217f6972016-11-15 10:15:11 -08005211 void *have_poll_lock = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005212 struct napi_struct *napi;
Eric Dumazet217f6972016-11-15 10:15:11 -08005213
5214restart:
Eric Dumazet217f6972016-11-15 10:15:11 -08005215 napi_poll = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005216
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005217 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005218
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005219 napi = napi_by_id(napi_id);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005220 if (!napi)
5221 goto out;
5222
Eric Dumazet217f6972016-11-15 10:15:11 -08005223 preempt_disable();
5224 for (;;) {
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005225 int work = 0;
5226
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005227 local_bh_disable();
Eric Dumazet217f6972016-11-15 10:15:11 -08005228 if (!napi_poll) {
5229 unsigned long val = READ_ONCE(napi->state);
5230
5231 /* If multiple threads are competing for this napi,
5232 * we avoid dirtying napi->state as much as we can.
5233 */
5234 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
5235 NAPIF_STATE_IN_BUSY_POLL))
5236 goto count;
5237 if (cmpxchg(&napi->state, val,
5238 val | NAPIF_STATE_IN_BUSY_POLL |
5239 NAPIF_STATE_SCHED) != val)
5240 goto count;
5241 have_poll_lock = netpoll_poll_lock(napi);
5242 napi_poll = napi->poll;
5243 }
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005244 work = napi_poll(napi, BUSY_POLL_BUDGET);
5245 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
Eric Dumazet217f6972016-11-15 10:15:11 -08005246count:
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005247 if (work > 0)
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005248 __NET_ADD_STATS(dev_net(napi->dev),
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005249 LINUX_MIB_BUSYPOLLRXPACKETS, work);
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005250 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005251
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005252 if (!loop_end || loop_end(loop_end_arg, start_time))
Eric Dumazet217f6972016-11-15 10:15:11 -08005253 break;
Eric Dumazet02d62e82015-11-18 06:30:52 -08005254
Eric Dumazet217f6972016-11-15 10:15:11 -08005255 if (unlikely(need_resched())) {
5256 if (napi_poll)
5257 busy_poll_stop(napi, have_poll_lock);
5258 preempt_enable();
5259 rcu_read_unlock();
5260 cond_resched();
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005261 if (loop_end(loop_end_arg, start_time))
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07005262 return;
Eric Dumazet217f6972016-11-15 10:15:11 -08005263 goto restart;
5264 }
Linus Torvalds6cdf89b2016-12-12 10:48:02 -08005265 cpu_relax();
Eric Dumazet217f6972016-11-15 10:15:11 -08005266 }
5267 if (napi_poll)
5268 busy_poll_stop(napi, have_poll_lock);
5269 preempt_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005270out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08005271 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08005272}
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07005273EXPORT_SYMBOL(napi_busy_loop);
Eric Dumazet02d62e82015-11-18 06:30:52 -08005274
5275#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005276
Eric Dumazet149d6ad2016-11-08 11:07:28 -08005277static void napi_hash_add(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005278{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005279 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
5280 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005281 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005282
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005283 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005284
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005285 /* 0..NR_CPUS range is reserved for sender_cpu use */
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005286 do {
Alexander Duyck545cd5e2017-03-24 10:07:53 -07005287 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5288 napi_gen_id = MIN_NAPI_ID;
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005289 } while (napi_by_id(napi_gen_id));
5290 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005291
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005292 hlist_add_head_rcu(&napi->napi_hash_node,
5293 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005294
Eric Dumazet52bd2d62015-11-18 06:30:50 -08005295 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005296}
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005297
5298/* Warning : caller is responsible to make sure rcu grace period
5299 * is respected before freeing memory containing @napi
5300 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08005301bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005302{
Eric Dumazet34cbe272015-11-18 06:31:02 -08005303 bool rcu_sync_needed = false;
5304
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005305 spin_lock(&napi_hash_lock);
5306
Eric Dumazet34cbe272015-11-18 06:31:02 -08005307 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
5308 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005309 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005310 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005311 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08005312 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03005313}
5314EXPORT_SYMBOL_GPL(napi_hash_del);
5315
Eric Dumazet3b47d302014-11-06 21:09:44 -08005316static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5317{
5318 struct napi_struct *napi;
5319
5320 napi = container_of(timer, struct napi_struct, timer);
Eric Dumazet39e6c822017-02-28 10:34:50 -08005321
5322 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5323 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5324 */
5325 if (napi->gro_list && !napi_disable_pending(napi) &&
5326 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5327 __napi_schedule_irqoff(napi);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005328
5329 return HRTIMER_NORESTART;
5330}
5331
Herbert Xud565b0a2008-12-15 23:38:52 -08005332void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5333 int (*poll)(struct napi_struct *, int), int weight)
5334{
5335 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005336 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5337 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00005338 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005339 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08005340 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08005341 napi->poll = poll;
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00005342 if (weight > NAPI_POLL_WEIGHT)
5343 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5344 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08005345 napi->weight = weight;
5346 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005347 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08005348#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08005349 napi->poll_owner = -1;
5350#endif
5351 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08005352 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005353}
5354EXPORT_SYMBOL(netif_napi_add);
5355
Eric Dumazet3b47d302014-11-06 21:09:44 -08005356void napi_disable(struct napi_struct *n)
5357{
5358 might_sleep();
5359 set_bit(NAPI_STATE_DISABLE, &n->state);
5360
5361 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5362 msleep(1);
Neil Horman2d8bff1262015-09-23 14:57:58 -04005363 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5364 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08005365
5366 hrtimer_cancel(&n->timer);
5367
5368 clear_bit(NAPI_STATE_DISABLE, &n->state);
5369}
5370EXPORT_SYMBOL(napi_disable);
5371
Eric Dumazet93d05d42015-11-18 06:31:03 -08005372/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08005373void netif_napi_del(struct napi_struct *napi)
5374{
Eric Dumazet93d05d42015-11-18 06:31:03 -08005375 might_sleep();
5376 if (napi_hash_del(napi))
5377 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08005378 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07005379 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08005380
Eric Dumazet289dccb2013-12-20 14:29:08 -08005381 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08005382 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005383 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005384}
5385EXPORT_SYMBOL(netif_napi_del);
5386
Herbert Xu726ce702014-12-21 07:16:21 +11005387static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5388{
5389 void *have;
5390 int work, weight;
5391
5392 list_del_init(&n->poll_list);
5393
5394 have = netpoll_poll_lock(n);
5395
5396 weight = n->weight;
5397
5398 /* This NAPI_STATE_SCHED test is for avoiding a race
5399 * with netpoll's poll_napi(). Only the entity which
5400 * obtains the lock and sees NAPI_STATE_SCHED set will
5401 * actually make the ->poll() call. Therefore we avoid
5402 * accidentally calling ->poll() when NAPI is not scheduled.
5403 */
5404 work = 0;
5405 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5406 work = n->poll(n, weight);
Jesper Dangaard Brouer1db19db2016-07-07 18:01:32 +02005407 trace_napi_poll(n, work, weight);
Herbert Xu726ce702014-12-21 07:16:21 +11005408 }
5409
5410 WARN_ON_ONCE(work > weight);
5411
5412 if (likely(work < weight))
5413 goto out_unlock;
5414
5415 /* Drivers must not modify the NAPI state if they
5416 * consume the entire weight. In such cases this code
5417 * still "owns" the NAPI instance and therefore can
5418 * move the instance around on the list at-will.
5419 */
5420 if (unlikely(napi_disable_pending(n))) {
5421 napi_complete(n);
5422 goto out_unlock;
5423 }
5424
5425 if (n->gro_list) {
5426 /* flush too old packets
5427 * If HZ < 1000, flush all packets.
5428 */
5429 napi_gro_flush(n, HZ >= 1000);
5430 }
5431
Herbert Xu001ce542014-12-21 07:16:22 +11005432 /* Some drivers may have called napi_schedule
5433 * prior to exhausting their budget.
5434 */
5435 if (unlikely(!list_empty(&n->poll_list))) {
5436 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5437 n->dev ? n->dev->name : "backlog");
5438 goto out_unlock;
5439 }
5440
Herbert Xu726ce702014-12-21 07:16:21 +11005441 list_add_tail(&n->poll_list, repoll);
5442
5443out_unlock:
5444 netpoll_poll_unlock(have);
5445
5446 return work;
5447}
5448
Emese Revfy0766f782016-06-20 20:42:34 +02005449static __latent_entropy void net_rx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450{
Christoph Lameter903ceff2014-08-17 12:30:35 -05005451 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04005452 unsigned long time_limit = jiffies +
5453 usecs_to_jiffies(netdev_budget_usecs);
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07005454 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005455 LIST_HEAD(list);
5456 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07005457
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005459 list_splice_init(&sd->poll_list, &list);
5460 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005461
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005462 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005463 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005465 if (list_empty(&list)) {
5466 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005467 goto out;
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005468 break;
5469 }
5470
Herbert Xu6bd373e2014-12-21 07:16:24 +11005471 n = list_first_entry(&list, struct napi_struct, poll_list);
5472 budget -= napi_poll(n, &repoll);
5473
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005474 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08005475 * Allow this to run for 2 jiffies since which will allow
5476 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005477 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11005478 if (unlikely(budget <= 0 ||
5479 time_after_eq(jiffies, time_limit))) {
5480 sd->time_squeeze++;
5481 break;
5482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005483 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005484
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08005485 local_irq_disable();
5486
5487 list_splice_tail_init(&sd->poll_list, &list);
5488 list_splice_tail(&repoll, &list);
5489 list_splice(&list, &sd->poll_list);
5490 if (!list_empty(&sd->poll_list))
5491 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5492
Eric Dumazete326bed2010-04-22 00:22:45 -07005493 net_rps_action_and_irq_enable(sd);
Eric Dumazetf52dffe2016-11-23 08:44:56 -08005494out:
5495 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496}
5497
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005498struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005499 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005500
5501 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005502 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005503
Veaceslav Falico5d261912013-08-28 23:25:05 +02005504 /* counter for the number of times this device was added to us */
5505 u16 ref_nr;
5506
Veaceslav Falico402dae92013-09-25 09:20:09 +02005507 /* private field for the users */
5508 void *private;
5509
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005510 struct list_head list;
5511 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005512};
5513
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005514static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005515 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005516{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005517 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005518
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005519 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005520 if (adj->dev == adj_dev)
5521 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005522 }
5523 return NULL;
5524}
5525
David Ahernf1170fd2016-10-17 19:15:51 -07005526static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
5527{
5528 struct net_device *dev = data;
5529
5530 return upper_dev == dev;
5531}
5532
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005533/**
5534 * netdev_has_upper_dev - Check if device is linked to an upper device
5535 * @dev: device
5536 * @upper_dev: upper device to check
5537 *
5538 * Find out if a device is linked to specified upper device and return true
5539 * in case it is. Note that this checks only immediate upper device,
5540 * not through a complete stack of devices. The caller must hold the RTNL lock.
5541 */
5542bool netdev_has_upper_dev(struct net_device *dev,
5543 struct net_device *upper_dev)
5544{
5545 ASSERT_RTNL();
5546
David Ahernf1170fd2016-10-17 19:15:51 -07005547 return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5548 upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005549}
5550EXPORT_SYMBOL(netdev_has_upper_dev);
5551
5552/**
David Ahern1a3f0602016-10-17 19:15:44 -07005553 * netdev_has_upper_dev_all - Check if device is linked to an upper device
5554 * @dev: device
5555 * @upper_dev: upper device to check
5556 *
5557 * Find out if a device is linked to specified upper device and return true
5558 * in case it is. Note that this checks the entire upper device chain.
5559 * The caller must hold rcu lock.
5560 */
5561
David Ahern1a3f0602016-10-17 19:15:44 -07005562bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5563 struct net_device *upper_dev)
5564{
5565 return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
5566 upper_dev);
5567}
5568EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
5569
5570/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005571 * netdev_has_any_upper_dev - Check if device is linked to some device
5572 * @dev: device
5573 *
5574 * Find out if a device is linked to an upper device and return true in case
5575 * it is. The caller must hold the RTNL lock.
5576 */
stephen hemminger1d143d92013-12-29 14:01:29 -08005577static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005578{
5579 ASSERT_RTNL();
5580
David Ahernf1170fd2016-10-17 19:15:51 -07005581 return !list_empty(&dev->adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005582}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005583
5584/**
5585 * netdev_master_upper_dev_get - Get master upper device
5586 * @dev: device
5587 *
5588 * Find a master upper device and return pointer to it or NULL in case
5589 * it's not there. The caller must hold the RTNL lock.
5590 */
5591struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5592{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005593 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005594
5595 ASSERT_RTNL();
5596
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005597 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005598 return NULL;
5599
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005600 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005601 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005602 if (likely(upper->master))
5603 return upper->dev;
5604 return NULL;
5605}
5606EXPORT_SYMBOL(netdev_master_upper_dev_get);
5607
David Ahern0f524a82016-10-17 19:15:52 -07005608/**
5609 * netdev_has_any_lower_dev - Check if device is linked to some device
5610 * @dev: device
5611 *
5612 * Find out if a device is linked to a lower device and return true in case
5613 * it is. The caller must hold the RTNL lock.
5614 */
5615static bool netdev_has_any_lower_dev(struct net_device *dev)
5616{
5617 ASSERT_RTNL();
5618
5619 return !list_empty(&dev->adj_list.lower);
5620}
5621
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02005622void *netdev_adjacent_get_private(struct list_head *adj_list)
5623{
5624 struct netdev_adjacent *adj;
5625
5626 adj = list_entry(adj_list, struct netdev_adjacent, list);
5627
5628 return adj->private;
5629}
5630EXPORT_SYMBOL(netdev_adjacent_get_private);
5631
Veaceslav Falico31088a12013-09-25 09:20:12 +02005632/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04005633 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5634 * @dev: device
5635 * @iter: list_head ** of the current position
5636 *
5637 * Gets the next device from the dev's upper list, starting from iter
5638 * position. The caller must hold RCU read lock.
5639 */
5640struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5641 struct list_head **iter)
5642{
5643 struct netdev_adjacent *upper;
5644
5645 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5646
5647 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5648
5649 if (&upper->list == &dev->adj_list.upper)
5650 return NULL;
5651
5652 *iter = &upper->list;
5653
5654 return upper->dev;
5655}
5656EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5657
David Ahern1a3f0602016-10-17 19:15:44 -07005658static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
5659 struct list_head **iter)
5660{
5661 struct netdev_adjacent *upper;
5662
5663 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5664
5665 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5666
5667 if (&upper->list == &dev->adj_list.upper)
5668 return NULL;
5669
5670 *iter = &upper->list;
5671
5672 return upper->dev;
5673}
5674
5675int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5676 int (*fn)(struct net_device *dev,
5677 void *data),
5678 void *data)
5679{
5680 struct net_device *udev;
5681 struct list_head *iter;
5682 int ret;
5683
5684 for (iter = &dev->adj_list.upper,
5685 udev = netdev_next_upper_dev_rcu(dev, &iter);
5686 udev;
5687 udev = netdev_next_upper_dev_rcu(dev, &iter)) {
5688 /* first is the upper device itself */
5689 ret = fn(udev, data);
5690 if (ret)
5691 return ret;
5692
5693 /* then look at all of its upper devices */
5694 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
5695 if (ret)
5696 return ret;
5697 }
5698
5699 return 0;
5700}
5701EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
5702
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005703/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005704 * netdev_lower_get_next_private - Get the next ->private from the
5705 * lower neighbour list
5706 * @dev: device
5707 * @iter: list_head ** of the current position
5708 *
5709 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5710 * list, starting from iter position. The caller must hold either hold the
5711 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005712 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02005713 */
5714void *netdev_lower_get_next_private(struct net_device *dev,
5715 struct list_head **iter)
5716{
5717 struct netdev_adjacent *lower;
5718
5719 lower = list_entry(*iter, struct netdev_adjacent, list);
5720
5721 if (&lower->list == &dev->adj_list.lower)
5722 return NULL;
5723
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005724 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005725
5726 return lower->private;
5727}
5728EXPORT_SYMBOL(netdev_lower_get_next_private);
5729
5730/**
5731 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5732 * lower neighbour list, RCU
5733 * variant
5734 * @dev: device
5735 * @iter: list_head ** of the current position
5736 *
5737 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5738 * list, starting from iter position. The caller must hold RCU read lock.
5739 */
5740void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5741 struct list_head **iter)
5742{
5743 struct netdev_adjacent *lower;
5744
5745 WARN_ON_ONCE(!rcu_read_lock_held());
5746
5747 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5748
5749 if (&lower->list == &dev->adj_list.lower)
5750 return NULL;
5751
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005752 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005753
5754 return lower->private;
5755}
5756EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5757
5758/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005759 * netdev_lower_get_next - Get the next device from the lower neighbour
5760 * list
5761 * @dev: device
5762 * @iter: list_head ** of the current position
5763 *
5764 * Gets the next netdev_adjacent from the dev's lower neighbour
5765 * list, starting from iter position. The caller must hold RTNL lock or
5766 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005767 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005768 */
5769void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5770{
5771 struct netdev_adjacent *lower;
5772
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005773 lower = list_entry(*iter, struct netdev_adjacent, list);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005774
5775 if (&lower->list == &dev->adj_list.lower)
5776 return NULL;
5777
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01005778 *iter = lower->list.next;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005779
5780 return lower->dev;
5781}
5782EXPORT_SYMBOL(netdev_lower_get_next);
5783
David Ahern1a3f0602016-10-17 19:15:44 -07005784static struct net_device *netdev_next_lower_dev(struct net_device *dev,
5785 struct list_head **iter)
5786{
5787 struct netdev_adjacent *lower;
5788
David Ahern46b5ab12016-10-26 13:21:33 -07005789 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
David Ahern1a3f0602016-10-17 19:15:44 -07005790
5791 if (&lower->list == &dev->adj_list.lower)
5792 return NULL;
5793
David Ahern46b5ab12016-10-26 13:21:33 -07005794 *iter = &lower->list;
David Ahern1a3f0602016-10-17 19:15:44 -07005795
5796 return lower->dev;
5797}
5798
5799int netdev_walk_all_lower_dev(struct net_device *dev,
5800 int (*fn)(struct net_device *dev,
5801 void *data),
5802 void *data)
5803{
5804 struct net_device *ldev;
5805 struct list_head *iter;
5806 int ret;
5807
5808 for (iter = &dev->adj_list.lower,
5809 ldev = netdev_next_lower_dev(dev, &iter);
5810 ldev;
5811 ldev = netdev_next_lower_dev(dev, &iter)) {
5812 /* first is the lower device itself */
5813 ret = fn(ldev, data);
5814 if (ret)
5815 return ret;
5816
5817 /* then look at all of its lower devices */
5818 ret = netdev_walk_all_lower_dev(ldev, fn, data);
5819 if (ret)
5820 return ret;
5821 }
5822
5823 return 0;
5824}
5825EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
5826
David Ahern1a3f0602016-10-17 19:15:44 -07005827static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5828 struct list_head **iter)
5829{
5830 struct netdev_adjacent *lower;
5831
5832 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5833 if (&lower->list == &dev->adj_list.lower)
5834 return NULL;
5835
5836 *iter = &lower->list;
5837
5838 return lower->dev;
5839}
5840
5841int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5842 int (*fn)(struct net_device *dev,
5843 void *data),
5844 void *data)
5845{
5846 struct net_device *ldev;
5847 struct list_head *iter;
5848 int ret;
5849
5850 for (iter = &dev->adj_list.lower,
5851 ldev = netdev_next_lower_dev_rcu(dev, &iter);
5852 ldev;
5853 ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
5854 /* first is the lower device itself */
5855 ret = fn(ldev, data);
5856 if (ret)
5857 return ret;
5858
5859 /* then look at all of its lower devices */
5860 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
5861 if (ret)
5862 return ret;
5863 }
5864
5865 return 0;
5866}
5867EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
5868
Jiri Pirko7ce856a2016-07-04 08:23:12 +02005869/**
dingtianhonge001bfa2013-12-13 10:19:55 +08005870 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5871 * lower neighbour list, RCU
5872 * variant
5873 * @dev: device
5874 *
5875 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5876 * list. The caller must hold RCU read lock.
5877 */
5878void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5879{
5880 struct netdev_adjacent *lower;
5881
5882 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5883 struct netdev_adjacent, list);
5884 if (lower)
5885 return lower->private;
5886 return NULL;
5887}
5888EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5889
5890/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005891 * netdev_master_upper_dev_get_rcu - Get master upper device
5892 * @dev: device
5893 *
5894 * Find a master upper device and return pointer to it or NULL in case
5895 * it's not there. The caller must hold the RCU read lock.
5896 */
5897struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5898{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005899 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005900
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005901 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005902 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005903 if (upper && likely(upper->master))
5904 return upper->dev;
5905 return NULL;
5906}
5907EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5908
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305909static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005910 struct net_device *adj_dev,
5911 struct list_head *dev_list)
5912{
5913 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11005914
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005915 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5916 "upper_%s" : "lower_%s", adj_dev->name);
5917 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5918 linkname);
5919}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305920static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005921 char *name,
5922 struct list_head *dev_list)
5923{
5924 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11005925
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005926 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5927 "upper_%s" : "lower_%s", name);
5928 sysfs_remove_link(&(dev->dev.kobj), linkname);
5929}
5930
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005931static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5932 struct net_device *adj_dev,
5933 struct list_head *dev_list)
5934{
5935 return (dev_list == &dev->adj_list.upper ||
5936 dev_list == &dev->adj_list.lower) &&
5937 net_eq(dev_net(dev), dev_net(adj_dev));
5938}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005939
Veaceslav Falico5d261912013-08-28 23:25:05 +02005940static int __netdev_adjacent_dev_insert(struct net_device *dev,
5941 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02005942 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005943 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005944{
5945 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005946 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005947
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005948 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005949
5950 if (adj) {
David Ahern790510d2016-10-17 19:15:43 -07005951 adj->ref_nr += 1;
David Ahern67b62f92016-10-17 19:15:53 -07005952 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
5953 dev->name, adj_dev->name, adj->ref_nr);
5954
Veaceslav Falico5d261912013-08-28 23:25:05 +02005955 return 0;
5956 }
5957
5958 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5959 if (!adj)
5960 return -ENOMEM;
5961
5962 adj->dev = adj_dev;
5963 adj->master = master;
David Ahern790510d2016-10-17 19:15:43 -07005964 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005965 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005966 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005967
David Ahern67b62f92016-10-17 19:15:53 -07005968 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
5969 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005970
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005971 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005972 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02005973 if (ret)
5974 goto free_adj;
5975 }
5976
Veaceslav Falico7863c052013-09-25 09:20:06 +02005977 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005978 if (master) {
5979 ret = sysfs_create_link(&(dev->dev.kobj),
5980 &(adj_dev->dev.kobj), "master");
5981 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02005982 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005983
Veaceslav Falico7863c052013-09-25 09:20:06 +02005984 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005985 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02005986 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005987 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005988
5989 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005990
Veaceslav Falico5831d662013-09-25 09:20:32 +02005991remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005992 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005993 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005994free_adj:
5995 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02005996 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005997
5998 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005999}
6000
stephen hemminger1d143d92013-12-29 14:01:29 -08006001static void __netdev_adjacent_dev_remove(struct net_device *dev,
6002 struct net_device *adj_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006003 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006004 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006005{
6006 struct netdev_adjacent *adj;
6007
David Ahern67b62f92016-10-17 19:15:53 -07006008 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6009 dev->name, adj_dev->name, ref_nr);
6010
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006011 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006012
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006013 if (!adj) {
David Ahern67b62f92016-10-17 19:15:53 -07006014 pr_err("Adjacency does not exist for device %s from %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006015 dev->name, adj_dev->name);
David Ahern67b62f92016-10-17 19:15:53 -07006016 WARN_ON(1);
6017 return;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006018 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02006019
Andrew Collins93409032016-10-03 13:43:02 -06006020 if (adj->ref_nr > ref_nr) {
David Ahern67b62f92016-10-17 19:15:53 -07006021 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6022 dev->name, adj_dev->name, ref_nr,
6023 adj->ref_nr - ref_nr);
Andrew Collins93409032016-10-03 13:43:02 -06006024 adj->ref_nr -= ref_nr;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006025 return;
6026 }
6027
Veaceslav Falico842d67a2013-09-25 09:20:31 +02006028 if (adj->master)
6029 sysfs_remove_link(&(dev->dev.kobj), "master");
6030
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04006031 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01006032 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02006033
Veaceslav Falico5d261912013-08-28 23:25:05 +02006034 list_del_rcu(&adj->list);
David Ahern67b62f92016-10-17 19:15:53 -07006035 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006036 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006037 dev_put(adj_dev);
6038 kfree_rcu(adj, rcu);
6039}
6040
stephen hemminger1d143d92013-12-29 14:01:29 -08006041static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6042 struct net_device *upper_dev,
6043 struct list_head *up_list,
6044 struct list_head *down_list,
6045 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006046{
6047 int ret;
6048
David Ahern790510d2016-10-17 19:15:43 -07006049 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
Andrew Collins93409032016-10-03 13:43:02 -06006050 private, master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006051 if (ret)
6052 return ret;
6053
David Ahern790510d2016-10-17 19:15:43 -07006054 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
Andrew Collins93409032016-10-03 13:43:02 -06006055 private, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006056 if (ret) {
David Ahern790510d2016-10-17 19:15:43 -07006057 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006058 return ret;
6059 }
6060
6061 return 0;
6062}
6063
stephen hemminger1d143d92013-12-29 14:01:29 -08006064static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6065 struct net_device *upper_dev,
Andrew Collins93409032016-10-03 13:43:02 -06006066 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08006067 struct list_head *up_list,
6068 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02006069{
Andrew Collins93409032016-10-03 13:43:02 -06006070 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6071 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006072}
6073
stephen hemminger1d143d92013-12-29 14:01:29 -08006074static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6075 struct net_device *upper_dev,
6076 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006077{
David Ahernf1170fd2016-10-17 19:15:51 -07006078 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6079 &dev->adj_list.upper,
6080 &upper_dev->adj_list.lower,
6081 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006082}
6083
stephen hemminger1d143d92013-12-29 14:01:29 -08006084static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6085 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006086{
Andrew Collins93409032016-10-03 13:43:02 -06006087 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006088 &dev->adj_list.upper,
6089 &upper_dev->adj_list.lower);
6090}
Veaceslav Falico5d261912013-08-28 23:25:05 +02006091
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006092static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006093 struct net_device *upper_dev, bool master,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006094 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006095{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006096 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006097 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006098
6099 ASSERT_RTNL();
6100
6101 if (dev == upper_dev)
6102 return -EBUSY;
6103
6104 /* To prevent loops, check if dev is not upper device to upper_dev. */
David Ahernf1170fd2016-10-17 19:15:51 -07006105 if (netdev_has_upper_dev(upper_dev, dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006106 return -EBUSY;
6107
David Ahernf1170fd2016-10-17 19:15:51 -07006108 if (netdev_has_upper_dev(dev, upper_dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006109 return -EEXIST;
6110
6111 if (master && netdev_master_upper_dev_get(dev))
6112 return -EBUSY;
6113
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006114 changeupper_info.upper_dev = upper_dev;
6115 changeupper_info.master = master;
6116 changeupper_info.linking = true;
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006117 changeupper_info.upper_info = upper_info;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006118
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006119 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6120 &changeupper_info.info);
6121 ret = notifier_to_errno(ret);
6122 if (ret)
6123 return ret;
6124
Jiri Pirko6dffb042015-12-03 12:12:10 +01006125 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02006126 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006127 if (ret)
6128 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006129
Ido Schimmelb03804e2015-12-03 12:12:03 +01006130 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6131 &changeupper_info.info);
6132 ret = notifier_to_errno(ret);
6133 if (ret)
David Ahernf1170fd2016-10-17 19:15:51 -07006134 goto rollback;
Ido Schimmelb03804e2015-12-03 12:12:03 +01006135
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006136 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006137
David Ahernf1170fd2016-10-17 19:15:51 -07006138rollback:
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006139 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006140
6141 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006142}
6143
6144/**
6145 * netdev_upper_dev_link - Add a link to the upper device
6146 * @dev: device
6147 * @upper_dev: new upper device
6148 *
6149 * Adds a link to device which is upper to this one. The caller must hold
6150 * the RTNL lock. On a failure a negative errno code is returned.
6151 * On success the reference counts are adjusted and the function
6152 * returns zero.
6153 */
6154int netdev_upper_dev_link(struct net_device *dev,
6155 struct net_device *upper_dev)
6156{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006157 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006158}
6159EXPORT_SYMBOL(netdev_upper_dev_link);
6160
6161/**
6162 * netdev_master_upper_dev_link - Add a master link to the upper device
6163 * @dev: device
6164 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01006165 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006166 * @upper_info: upper info to be passed down via notifier
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006167 *
6168 * Adds a link to device which is upper to this one. In this case, only
6169 * one master upper device can be linked, although other non-master devices
6170 * might be linked as well. The caller must hold the RTNL lock.
6171 * On a failure a negative errno code is returned. On success the reference
6172 * counts are adjusted and the function returns zero.
6173 */
6174int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01006175 struct net_device *upper_dev,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006176 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006177{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01006178 return __netdev_upper_dev_link(dev, upper_dev, true,
6179 upper_priv, upper_info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006180}
6181EXPORT_SYMBOL(netdev_master_upper_dev_link);
6182
6183/**
6184 * netdev_upper_dev_unlink - Removes a link to upper device
6185 * @dev: device
6186 * @upper_dev: new upper device
6187 *
6188 * Removes a link to device which is upper to this one. The caller must hold
6189 * the RTNL lock.
6190 */
6191void netdev_upper_dev_unlink(struct net_device *dev,
6192 struct net_device *upper_dev)
6193{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006194 struct netdev_notifier_changeupper_info changeupper_info;
tchardingf4563a72017-02-09 17:56:07 +11006195
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006196 ASSERT_RTNL();
6197
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006198 changeupper_info.upper_dev = upper_dev;
6199 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
6200 changeupper_info.linking = false;
6201
Jiri Pirko573c7ba2015-10-16 14:01:22 +02006202 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
6203 &changeupper_info.info);
6204
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006205 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02006206
Jiri Pirko0e4ead92015-08-27 09:31:18 +02006207 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
6208 &changeupper_info.info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006209}
6210EXPORT_SYMBOL(netdev_upper_dev_unlink);
6211
Moni Shoua61bd3852015-02-03 16:48:29 +02006212/**
6213 * netdev_bonding_info_change - Dispatch event about slave change
6214 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09006215 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02006216 *
6217 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
6218 * The caller must hold the RTNL lock.
6219 */
6220void netdev_bonding_info_change(struct net_device *dev,
6221 struct netdev_bonding_info *bonding_info)
6222{
6223 struct netdev_notifier_bonding_info info;
6224
6225 memcpy(&info.bonding_info, bonding_info,
6226 sizeof(struct netdev_bonding_info));
6227 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
6228 &info.info);
6229}
6230EXPORT_SYMBOL(netdev_bonding_info_change);
6231
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006232static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006233{
6234 struct netdev_adjacent *iter;
6235
6236 struct net *net = dev_net(dev);
6237
6238 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006239 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006240 continue;
6241 netdev_adjacent_sysfs_add(iter->dev, dev,
6242 &iter->dev->adj_list.lower);
6243 netdev_adjacent_sysfs_add(dev, iter->dev,
6244 &dev->adj_list.upper);
6245 }
6246
6247 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006248 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006249 continue;
6250 netdev_adjacent_sysfs_add(iter->dev, dev,
6251 &iter->dev->adj_list.upper);
6252 netdev_adjacent_sysfs_add(dev, iter->dev,
6253 &dev->adj_list.lower);
6254 }
6255}
6256
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006257static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006258{
6259 struct netdev_adjacent *iter;
6260
6261 struct net *net = dev_net(dev);
6262
6263 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006264 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006265 continue;
6266 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6267 &iter->dev->adj_list.lower);
6268 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6269 &dev->adj_list.upper);
6270 }
6271
6272 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006273 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006274 continue;
6275 netdev_adjacent_sysfs_del(iter->dev, dev->name,
6276 &iter->dev->adj_list.upper);
6277 netdev_adjacent_sysfs_del(dev, iter->dev->name,
6278 &dev->adj_list.lower);
6279 }
6280}
6281
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006282void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02006283{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006284 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02006285
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006286 struct net *net = dev_net(dev);
6287
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006288 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006289 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006290 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006291 netdev_adjacent_sysfs_del(iter->dev, oldname,
6292 &iter->dev->adj_list.lower);
6293 netdev_adjacent_sysfs_add(iter->dev, dev,
6294 &iter->dev->adj_list.lower);
6295 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006296
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006297 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08006298 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04006299 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01006300 netdev_adjacent_sysfs_del(iter->dev, oldname,
6301 &iter->dev->adj_list.upper);
6302 netdev_adjacent_sysfs_add(iter->dev, dev,
6303 &iter->dev->adj_list.upper);
6304 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02006305}
Veaceslav Falico402dae92013-09-25 09:20:09 +02006306
6307void *netdev_lower_dev_get_private(struct net_device *dev,
6308 struct net_device *lower_dev)
6309{
6310 struct netdev_adjacent *lower;
6311
6312 if (!lower_dev)
6313 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006314 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02006315 if (!lower)
6316 return NULL;
6317
6318 return lower->private;
6319}
6320EXPORT_SYMBOL(netdev_lower_dev_get_private);
6321
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006322
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006323int dev_get_nest_level(struct net_device *dev)
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006324{
6325 struct net_device *lower = NULL;
6326 struct list_head *iter;
6327 int max_nest = -1;
6328 int nest;
6329
6330 ASSERT_RTNL();
6331
6332 netdev_for_each_lower_dev(dev, lower, iter) {
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006333 nest = dev_get_nest_level(lower);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006334 if (max_nest < nest)
6335 max_nest = nest;
6336 }
6337
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02006338 return max_nest + 1;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04006339}
6340EXPORT_SYMBOL(dev_get_nest_level);
6341
Jiri Pirko04d48262015-12-03 12:12:15 +01006342/**
6343 * netdev_lower_change - Dispatch event about lower device state change
6344 * @lower_dev: device
6345 * @lower_state_info: state to dispatch
6346 *
6347 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
6348 * The caller must hold the RTNL lock.
6349 */
6350void netdev_lower_state_changed(struct net_device *lower_dev,
6351 void *lower_state_info)
6352{
6353 struct netdev_notifier_changelowerstate_info changelowerstate_info;
6354
6355 ASSERT_RTNL();
6356 changelowerstate_info.lower_state_info = lower_state_info;
6357 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
6358 &changelowerstate_info.info);
6359}
6360EXPORT_SYMBOL(netdev_lower_state_changed);
6361
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006362static void dev_change_rx_flags(struct net_device *dev, int flags)
6363{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006364 const struct net_device_ops *ops = dev->netdev_ops;
6365
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05006366 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006367 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006368}
6369
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006370static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07006371{
Eric Dumazetb536db92011-11-30 21:42:26 +00006372 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006373 kuid_t uid;
6374 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07006375
Patrick McHardy24023452007-07-14 18:51:31 -07006376 ASSERT_RTNL();
6377
Wang Chendad9b332008-06-18 01:48:28 -07006378 dev->flags |= IFF_PROMISC;
6379 dev->promiscuity += inc;
6380 if (dev->promiscuity == 0) {
6381 /*
6382 * Avoid overflow.
6383 * If inc causes overflow, untouch promisc and return error.
6384 */
6385 if (inc < 0)
6386 dev->flags &= ~IFF_PROMISC;
6387 else {
6388 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006389 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6390 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006391 return -EOVERFLOW;
6392 }
6393 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006394 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006395 pr_info("device %s %s promiscuous mode\n",
6396 dev->name,
6397 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11006398 if (audit_enabled) {
6399 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006400 audit_log(current->audit_context, GFP_ATOMIC,
6401 AUDIT_ANOM_PROMISCUOUS,
6402 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6403 dev->name, (dev->flags & IFF_PROMISC),
6404 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07006405 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06006406 from_kuid(&init_user_ns, uid),
6407 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05006408 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11006409 }
Patrick McHardy24023452007-07-14 18:51:31 -07006410
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006411 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07006412 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006413 if (notify)
6414 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07006415 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006416}
6417
Linus Torvalds1da177e2005-04-16 15:20:36 -07006418/**
6419 * dev_set_promiscuity - update promiscuity count on a device
6420 * @dev: device
6421 * @inc: modifier
6422 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07006423 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006424 * remains above zero the interface remains promiscuous. Once it hits zero
6425 * the device reverts back to normal filtering operation. A negative inc
6426 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07006427 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006428 */
Wang Chendad9b332008-06-18 01:48:28 -07006429int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006430{
Eric Dumazetb536db92011-11-30 21:42:26 +00006431 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07006432 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006433
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006434 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07006435 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07006436 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07006437 if (dev->flags != old_flags)
6438 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07006439 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006440}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006441EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006442
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006443static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006444{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006445 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006446
Patrick McHardy24023452007-07-14 18:51:31 -07006447 ASSERT_RTNL();
6448
Linus Torvalds1da177e2005-04-16 15:20:36 -07006449 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07006450 dev->allmulti += inc;
6451 if (dev->allmulti == 0) {
6452 /*
6453 * Avoid overflow.
6454 * If inc causes overflow, untouch allmulti and return error.
6455 */
6456 if (inc < 0)
6457 dev->flags &= ~IFF_ALLMULTI;
6458 else {
6459 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006460 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6461 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07006462 return -EOVERFLOW;
6463 }
6464 }
Patrick McHardy24023452007-07-14 18:51:31 -07006465 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006466 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07006467 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006468 if (notify)
6469 __dev_notify_flags(dev, old_flags,
6470 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07006471 }
Wang Chendad9b332008-06-18 01:48:28 -07006472 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07006473}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006474
6475/**
6476 * dev_set_allmulti - update allmulti count on a device
6477 * @dev: device
6478 * @inc: modifier
6479 *
6480 * Add or remove reception of all multicast frames to a device. While the
6481 * count in the device remains above zero the interface remains listening
6482 * to all interfaces. Once it hits zero the device reverts back to normal
6483 * filtering operation. A negative @inc value is used to drop the counter
6484 * when releasing a resource needing all multicasts.
6485 * Return 0 if successful or a negative errno code on error.
6486 */
6487
6488int dev_set_allmulti(struct net_device *dev, int inc)
6489{
6490 return __dev_set_allmulti(dev, inc, true);
6491}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006492EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07006493
6494/*
6495 * Upload unicast and multicast address lists to device and
6496 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08006497 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07006498 * are present.
6499 */
6500void __dev_set_rx_mode(struct net_device *dev)
6501{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006502 const struct net_device_ops *ops = dev->netdev_ops;
6503
Patrick McHardy4417da62007-06-27 01:28:10 -07006504 /* dev_open will call this function so the list will stay sane. */
6505 if (!(dev->flags&IFF_UP))
6506 return;
6507
6508 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09006509 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07006510
Jiri Pirko01789342011-08-16 06:29:00 +00006511 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07006512 /* Unicast addresses changes may only happen under the rtnl,
6513 * therefore calling __dev_set_promiscuity here is safe.
6514 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006515 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006516 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006517 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08006518 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006519 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07006520 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07006521 }
Patrick McHardy4417da62007-06-27 01:28:10 -07006522 }
Jiri Pirko01789342011-08-16 06:29:00 +00006523
6524 if (ops->ndo_set_rx_mode)
6525 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006526}
6527
6528void dev_set_rx_mode(struct net_device *dev)
6529{
David S. Millerb9e40852008-07-15 00:15:08 -07006530 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07006531 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07006532 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006533}
6534
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006535/**
6536 * dev_get_flags - get flags reported to userspace
6537 * @dev: device
6538 *
6539 * Get the combination of flag bits exported through APIs to userspace.
6540 */
Eric Dumazet95c96172012-04-15 05:58:06 +00006541unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006542{
Eric Dumazet95c96172012-04-15 05:58:06 +00006543 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006544
6545 flags = (dev->flags & ~(IFF_PROMISC |
6546 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08006547 IFF_RUNNING |
6548 IFF_LOWER_UP |
6549 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07006550 (dev->gflags & (IFF_PROMISC |
6551 IFF_ALLMULTI));
6552
Stefan Rompfb00055a2006-03-20 17:09:11 -08006553 if (netif_running(dev)) {
6554 if (netif_oper_up(dev))
6555 flags |= IFF_RUNNING;
6556 if (netif_carrier_ok(dev))
6557 flags |= IFF_LOWER_UP;
6558 if (netif_dormant(dev))
6559 flags |= IFF_DORMANT;
6560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006561
6562 return flags;
6563}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006564EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006565
Patrick McHardybd380812010-02-26 06:34:53 +00006566int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006567{
Eric Dumazetb536db92011-11-30 21:42:26 +00006568 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00006569 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006570
Patrick McHardy24023452007-07-14 18:51:31 -07006571 ASSERT_RTNL();
6572
Linus Torvalds1da177e2005-04-16 15:20:36 -07006573 /*
6574 * Set the flags on our device.
6575 */
6576
6577 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6578 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6579 IFF_AUTOMEDIA)) |
6580 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6581 IFF_ALLMULTI));
6582
6583 /*
6584 * Load in the correct multicast list now the flags have changed.
6585 */
6586
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006587 if ((old_flags ^ flags) & IFF_MULTICAST)
6588 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07006589
Patrick McHardy4417da62007-06-27 01:28:10 -07006590 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006591
6592 /*
6593 * Have we downed the interface. We handle IFF_UP ourselves
6594 * according to user attempts to set it, rather than blindly
6595 * setting it.
6596 */
6597
6598 ret = 0;
Peter Pan(潘卫平)d215d102014-06-16 21:57:22 +08006599 if ((old_flags ^ flags) & IFF_UP)
Patrick McHardybd380812010-02-26 06:34:53 +00006600 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601
Linus Torvalds1da177e2005-04-16 15:20:36 -07006602 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006603 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006604 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006605
Linus Torvalds1da177e2005-04-16 15:20:36 -07006606 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006607
6608 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6609 if (dev->flags != old_flags)
6610 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006611 }
6612
6613 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
tchardingeb13da12017-02-09 17:56:06 +11006614 * is important. Some (broken) drivers set IFF_PROMISC, when
6615 * IFF_ALLMULTI is requested not asking us and not reporting.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006616 */
6617 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006618 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6619
Linus Torvalds1da177e2005-04-16 15:20:36 -07006620 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006621 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006622 }
6623
Patrick McHardybd380812010-02-26 06:34:53 +00006624 return ret;
6625}
6626
Nicolas Dichtela528c212013-09-25 12:02:44 +02006627void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6628 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00006629{
6630 unsigned int changes = dev->flags ^ old_flags;
6631
Nicolas Dichtela528c212013-09-25 12:02:44 +02006632 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006633 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006634
Patrick McHardybd380812010-02-26 06:34:53 +00006635 if (changes & IFF_UP) {
6636 if (dev->flags & IFF_UP)
6637 call_netdevice_notifiers(NETDEV_UP, dev);
6638 else
6639 call_netdevice_notifiers(NETDEV_DOWN, dev);
6640 }
6641
6642 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00006643 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6644 struct netdev_notifier_change_info change_info;
6645
6646 change_info.flags_changed = changes;
6647 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6648 &change_info.info);
6649 }
Patrick McHardybd380812010-02-26 06:34:53 +00006650}
6651
6652/**
6653 * dev_change_flags - change device settings
6654 * @dev: device
6655 * @flags: device state flags
6656 *
6657 * Change settings on device based state flags. The flags are
6658 * in the userspace exported format.
6659 */
Eric Dumazetb536db92011-11-30 21:42:26 +00006660int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00006661{
Eric Dumazetb536db92011-11-30 21:42:26 +00006662 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006663 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00006664
6665 ret = __dev_change_flags(dev, flags);
6666 if (ret < 0)
6667 return ret;
6668
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006669 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006670 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006671 return ret;
6672}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006673EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006674
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006675static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6676{
6677 const struct net_device_ops *ops = dev->netdev_ops;
6678
6679 if (ops->ndo_change_mtu)
6680 return ops->ndo_change_mtu(dev, new_mtu);
6681
6682 dev->mtu = new_mtu;
6683 return 0;
6684}
6685
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006686/**
6687 * dev_set_mtu - Change maximum transfer unit
6688 * @dev: device
6689 * @new_mtu: new transfer unit
6690 *
6691 * Change the maximum transfer size of the network device.
6692 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006693int dev_set_mtu(struct net_device *dev, int new_mtu)
6694{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006695 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006696
6697 if (new_mtu == dev->mtu)
6698 return 0;
6699
Jarod Wilson61e84622016-10-07 22:04:33 -04006700 /* MTU must be positive, and in range */
6701 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
6702 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
6703 dev->name, new_mtu, dev->min_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006704 return -EINVAL;
Jarod Wilson61e84622016-10-07 22:04:33 -04006705 }
6706
6707 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
6708 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
Jakub Kicinskia0e65de2016-10-17 18:02:22 +01006709 dev->name, new_mtu, dev->max_mtu);
Jarod Wilson61e84622016-10-07 22:04:33 -04006710 return -EINVAL;
6711 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006712
6713 if (!netif_device_present(dev))
6714 return -ENODEV;
6715
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01006716 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6717 err = notifier_to_errno(err);
6718 if (err)
6719 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006720
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006721 orig_mtu = dev->mtu;
6722 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006724 if (!err) {
6725 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6726 err = notifier_to_errno(err);
6727 if (err) {
6728 /* setting mtu back and notifying everyone again,
6729 * so that they have a chance to revert changes.
6730 */
6731 __dev_set_mtu(dev, orig_mtu);
6732 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6733 }
6734 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006735 return err;
6736}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006737EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006738
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006739/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006740 * dev_set_group - Change group this device belongs to
6741 * @dev: device
6742 * @new_group: group this device should belong to
6743 */
6744void dev_set_group(struct net_device *dev, int new_group)
6745{
6746 dev->group = new_group;
6747}
6748EXPORT_SYMBOL(dev_set_group);
6749
6750/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006751 * dev_set_mac_address - Change Media Access Control Address
6752 * @dev: device
6753 * @sa: new address
6754 *
6755 * Change the hardware (MAC) address of the device
6756 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006757int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6758{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006759 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006760 int err;
6761
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006762 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006763 return -EOPNOTSUPP;
6764 if (sa->sa_family != dev->type)
6765 return -EINVAL;
6766 if (!netif_device_present(dev))
6767 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006768 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00006769 if (err)
6770 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00006771 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00006772 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006773 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00006774 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006776EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006777
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006778/**
6779 * dev_change_carrier - Change device carrier
6780 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00006781 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006782 *
6783 * Change device carrier
6784 */
6785int dev_change_carrier(struct net_device *dev, bool new_carrier)
6786{
6787 const struct net_device_ops *ops = dev->netdev_ops;
6788
6789 if (!ops->ndo_change_carrier)
6790 return -EOPNOTSUPP;
6791 if (!netif_device_present(dev))
6792 return -ENODEV;
6793 return ops->ndo_change_carrier(dev, new_carrier);
6794}
6795EXPORT_SYMBOL(dev_change_carrier);
6796
Linus Torvalds1da177e2005-04-16 15:20:36 -07006797/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02006798 * dev_get_phys_port_id - Get device physical port ID
6799 * @dev: device
6800 * @ppid: port ID
6801 *
6802 * Get device physical port ID
6803 */
6804int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01006805 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02006806{
6807 const struct net_device_ops *ops = dev->netdev_ops;
6808
6809 if (!ops->ndo_get_phys_port_id)
6810 return -EOPNOTSUPP;
6811 return ops->ndo_get_phys_port_id(dev, ppid);
6812}
6813EXPORT_SYMBOL(dev_get_phys_port_id);
6814
6815/**
David Aherndb24a902015-03-17 20:23:15 -06006816 * dev_get_phys_port_name - Get device physical port name
6817 * @dev: device
6818 * @name: port name
Luis de Bethencourted49e652016-03-21 16:31:14 +00006819 * @len: limit of bytes to copy to name
David Aherndb24a902015-03-17 20:23:15 -06006820 *
6821 * Get device physical port name
6822 */
6823int dev_get_phys_port_name(struct net_device *dev,
6824 char *name, size_t len)
6825{
6826 const struct net_device_ops *ops = dev->netdev_ops;
6827
6828 if (!ops->ndo_get_phys_port_name)
6829 return -EOPNOTSUPP;
6830 return ops->ndo_get_phys_port_name(dev, name, len);
6831}
6832EXPORT_SYMBOL(dev_get_phys_port_name);
6833
6834/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006835 * dev_change_proto_down - update protocol port state information
6836 * @dev: device
6837 * @proto_down: new value
6838 *
6839 * This info can be used by switch drivers to set the phys state of the
6840 * port.
6841 */
6842int dev_change_proto_down(struct net_device *dev, bool proto_down)
6843{
6844 const struct net_device_ops *ops = dev->netdev_ops;
6845
6846 if (!ops->ndo_change_proto_down)
6847 return -EOPNOTSUPP;
6848 if (!netif_device_present(dev))
6849 return -ENODEV;
6850 return ops->ndo_change_proto_down(dev, proto_down);
6851}
6852EXPORT_SYMBOL(dev_change_proto_down);
6853
6854/**
Brenden Blancoa7862b42016-07-19 12:16:48 -07006855 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6856 * @dev: device
Jakub Kicinskib5d60982017-05-01 15:53:43 -07006857 * @extack: netlink extended ack
Brenden Blancoa7862b42016-07-19 12:16:48 -07006858 * @fd: new program fd or negative value to clear
Daniel Borkmann85de8572016-11-28 23:16:54 +01006859 * @flags: xdp-related flags
Brenden Blancoa7862b42016-07-19 12:16:48 -07006860 *
6861 * Set or clear a bpf program for a device
6862 */
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07006863int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6864 int fd, u32 flags)
Brenden Blancoa7862b42016-07-19 12:16:48 -07006865{
David S. Millerb5cdae32017-04-18 15:36:58 -04006866 int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp);
Brenden Blancoa7862b42016-07-19 12:16:48 -07006867 const struct net_device_ops *ops = dev->netdev_ops;
6868 struct bpf_prog *prog = NULL;
Daniel Borkmann85de8572016-11-28 23:16:54 +01006869 struct netdev_xdp xdp;
Brenden Blancoa7862b42016-07-19 12:16:48 -07006870 int err;
6871
Daniel Borkmann85de8572016-11-28 23:16:54 +01006872 ASSERT_RTNL();
6873
David S. Millerb5cdae32017-04-18 15:36:58 -04006874 xdp_op = ops->ndo_xdp;
6875 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6876 xdp_op = generic_xdp_install;
6877
Brenden Blancoa7862b42016-07-19 12:16:48 -07006878 if (fd >= 0) {
Daniel Borkmann85de8572016-11-28 23:16:54 +01006879 if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) {
6880 memset(&xdp, 0, sizeof(xdp));
6881 xdp.command = XDP_QUERY_PROG;
6882
David S. Millerb5cdae32017-04-18 15:36:58 -04006883 err = xdp_op(dev, &xdp);
Daniel Borkmann85de8572016-11-28 23:16:54 +01006884 if (err < 0)
6885 return err;
6886 if (xdp.prog_attached)
6887 return -EBUSY;
6888 }
6889
Brenden Blancoa7862b42016-07-19 12:16:48 -07006890 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6891 if (IS_ERR(prog))
6892 return PTR_ERR(prog);
6893 }
6894
Daniel Borkmann85de8572016-11-28 23:16:54 +01006895 memset(&xdp, 0, sizeof(xdp));
Brenden Blancoa7862b42016-07-19 12:16:48 -07006896 xdp.command = XDP_SETUP_PROG;
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07006897 xdp.extack = extack;
Brenden Blancoa7862b42016-07-19 12:16:48 -07006898 xdp.prog = prog;
Daniel Borkmann85de8572016-11-28 23:16:54 +01006899
David S. Millerb5cdae32017-04-18 15:36:58 -04006900 err = xdp_op(dev, &xdp);
Brenden Blancoa7862b42016-07-19 12:16:48 -07006901 if (err < 0 && prog)
6902 bpf_prog_put(prog);
6903
6904 return err;
6905}
Brenden Blancoa7862b42016-07-19 12:16:48 -07006906
6907/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006908 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07006909 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07006910 *
6911 * Returns a suitable unique value for a new device interface
6912 * number. The caller must hold the rtnl semaphore or the
6913 * dev_base_lock to be sure it remains unique.
6914 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07006915static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00006917 int ifindex = net->ifindex;
tchardingf4563a72017-02-09 17:56:07 +11006918
Linus Torvalds1da177e2005-04-16 15:20:36 -07006919 for (;;) {
6920 if (++ifindex <= 0)
6921 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006922 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00006923 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006924 }
6925}
6926
Linus Torvalds1da177e2005-04-16 15:20:36 -07006927/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08006928static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07006929DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930
Stephen Hemminger6f05f622007-03-08 20:46:03 -08006931static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006932{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006933 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07006934 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006935}
6936
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006937static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006938{
Krishna Kumare93737b2009-12-08 22:26:02 +00006939 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006940 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006941
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006942 BUG_ON(dev_boot_phase);
6943 ASSERT_RTNL();
6944
Krishna Kumare93737b2009-12-08 22:26:02 +00006945 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006946 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00006947 * for initialization unwind. Remove those
6948 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006949 */
6950 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006951 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6952 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006953
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006954 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00006955 list_del(&dev->unreg_list);
6956 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006957 }
Eric Dumazet449f4542011-05-19 12:24:16 +00006958 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006959 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00006960 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006961
Octavian Purdila44345722010-12-13 12:44:07 +00006962 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006963 list_for_each_entry(dev, head, unreg_list)
6964 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04006965 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006966
Octavian Purdila44345722010-12-13 12:44:07 +00006967 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006968 /* And unlink it from device chain. */
6969 unlist_netdevice(dev);
6970
6971 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006972 }
Eric Dumazet41852492016-08-26 12:50:39 -07006973 flush_all_backlogs();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006974
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006975 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006976
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006977 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006978 struct sk_buff *skb = NULL;
6979
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006980 /* Shutdown queueing discipline. */
6981 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006982
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006983
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006984 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11006985 * this device. They should clean all the things.
6986 */
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006987 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6988
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006989 if (!dev->rtnl_link_ops ||
6990 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6991 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6992 GFP_KERNEL);
6993
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006994 /*
6995 * Flush the unicast and multicast chains
6996 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006997 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006998 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006999
7000 if (dev->netdev_ops->ndo_uninit)
7001 dev->netdev_ops->ndo_uninit(dev);
7002
Mahesh Bandewar395eea62014-12-03 13:46:24 -08007003 if (skb)
7004 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07007005
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007006 /* Notifier chain MUST detach us all upper devices. */
7007 WARN_ON(netdev_has_any_upper_dev(dev));
David Ahern0f524a82016-10-17 19:15:52 -07007008 WARN_ON(netdev_has_any_lower_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007009
7010 /* Remove entries from kobject tree */
7011 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00007012#ifdef CONFIG_XPS
7013 /* Remove XPS queueing entries */
7014 netif_reset_xps_queues_gt(dev, 0);
7015#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007016 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007017
Eric W. Biederman850a5452011-10-13 22:25:23 +00007018 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007019
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00007020 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007021 dev_put(dev);
7022}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007023
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007024static void rollback_registered(struct net_device *dev)
7025{
7026 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007027
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007028 list_add(&dev->unreg_list, &single);
7029 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007030 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007031}
7032
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007033static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7034 struct net_device *upper, netdev_features_t features)
7035{
7036 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7037 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007038 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007039
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007040 for_each_netdev_feature(&upper_disables, feature_bit) {
7041 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007042 if (!(upper->wanted_features & feature)
7043 && (features & feature)) {
7044 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7045 &feature, upper->name);
7046 features &= ~feature;
7047 }
7048 }
7049
7050 return features;
7051}
7052
7053static void netdev_sync_lower_features(struct net_device *upper,
7054 struct net_device *lower, netdev_features_t features)
7055{
7056 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7057 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007058 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007059
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05007060 for_each_netdev_feature(&upper_disables, feature_bit) {
7061 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007062 if (!(features & feature) && (lower->features & feature)) {
7063 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7064 &feature, lower->name);
7065 lower->wanted_features &= ~feature;
7066 netdev_update_features(lower);
7067
7068 if (unlikely(lower->features & feature))
7069 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
7070 &feature, lower->name);
7071 }
7072 }
7073}
7074
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007075static netdev_features_t netdev_fix_features(struct net_device *dev,
7076 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07007077{
Michał Mirosław57422dc2011-01-22 12:14:12 +00007078 /* Fix illegal checksum combinations */
7079 if ((features & NETIF_F_HW_CSUM) &&
7080 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007081 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00007082 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
7083 }
7084
Herbert Xub63365a2008-10-23 01:11:29 -07007085 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007086 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007087 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00007088 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07007089 }
7090
Pravin B Shelarec5f0612013-03-07 09:28:01 +00007091 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
7092 !(features & NETIF_F_IP_CSUM)) {
7093 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
7094 features &= ~NETIF_F_TSO;
7095 features &= ~NETIF_F_TSO_ECN;
7096 }
7097
7098 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
7099 !(features & NETIF_F_IPV6_CSUM)) {
7100 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
7101 features &= ~NETIF_F_TSO6;
7102 }
7103
Alexander Duyckb1dc4972016-05-02 09:38:24 -07007104 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
7105 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
7106 features &= ~NETIF_F_TSO_MANGLEID;
7107
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00007108 /* TSO ECN requires that TSO is present as well. */
7109 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
7110 features &= ~NETIF_F_TSO_ECN;
7111
Michał Mirosław212b5732011-02-15 16:59:16 +00007112 /* Software GSO depends on SG. */
7113 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007114 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00007115 features &= ~NETIF_F_GSO;
7116 }
7117
Michał Mirosławacd11302011-01-24 15:45:15 -08007118 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07007119 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00007120 /* maybe split UFO into V4 and V6? */
Tom Herbertc8cd0982015-12-14 11:19:44 -08007121 if (!(features & NETIF_F_HW_CSUM) &&
7122 ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
7123 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007124 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08007125 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07007126 features &= ~NETIF_F_UFO;
7127 }
7128
7129 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04007130 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08007131 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07007132 features &= ~NETIF_F_UFO;
7133 }
7134 }
7135
Alexander Duyck802ab552016-04-10 21:45:03 -04007136 /* GSO partial features require GSO partial be set */
7137 if ((features & dev->gso_partial_features) &&
7138 !(features & NETIF_F_GSO_PARTIAL)) {
7139 netdev_dbg(dev,
7140 "Dropping partially supported GSO features since no GSO partial.\n");
7141 features &= ~dev->gso_partial_features;
7142 }
7143
Herbert Xub63365a2008-10-23 01:11:29 -07007144 return features;
7145}
Herbert Xub63365a2008-10-23 01:11:29 -07007146
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007147int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00007148{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007149 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007150 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007151 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05007152 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00007153
Michał Mirosław87267482011-04-12 09:56:38 +00007154 ASSERT_RTNL();
7155
Michał Mirosław5455c692011-02-15 16:59:17 +00007156 features = netdev_get_wanted_features(dev);
7157
7158 if (dev->netdev_ops->ndo_fix_features)
7159 features = dev->netdev_ops->ndo_fix_features(dev, features);
7160
7161 /* driver might be less strict about feature dependencies */
7162 features = netdev_fix_features(dev, features);
7163
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007164 /* some features can't be enabled if they're off an an upper device */
7165 netdev_for_each_upper_dev_rcu(dev, upper, iter)
7166 features = netdev_sync_upper_features(dev, upper, features);
7167
Michał Mirosław5455c692011-02-15 16:59:17 +00007168 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05007169 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00007170
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007171 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
7172 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00007173
7174 if (dev->netdev_ops->ndo_set_features)
7175 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01007176 else
7177 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00007178
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007179 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00007180 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007181 "set_features() failed (%d); wanted %pNF, left %pNF\n",
7182 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01007183 /* return non-0 since some features might have changed and
7184 * it's better to fire a spurious notification than miss it
7185 */
7186 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007187 }
7188
Jarod Wilsone7868a82015-11-03 23:09:32 -05007189sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05007190 /* some features must be disabled on lower devices when disabled
7191 * on an upper device (think: bonding master or bridge)
7192 */
7193 netdev_for_each_lower_dev(dev, lower, iter)
7194 netdev_sync_lower_features(dev, lower, features);
7195
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007196 if (!err)
7197 dev->features = features;
7198
Jarod Wilsone7868a82015-11-03 23:09:32 -05007199 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007200}
7201
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007202/**
7203 * netdev_update_features - recalculate device features
7204 * @dev: the device to check
7205 *
7206 * Recalculate dev->features set and send notifications if it
7207 * has changed. Should be called after driver or hardware dependent
7208 * conditions might have changed that influence the features.
7209 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007210void netdev_update_features(struct net_device *dev)
7211{
7212 if (__netdev_update_features(dev))
7213 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00007214}
7215EXPORT_SYMBOL(netdev_update_features);
7216
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00007218 * netdev_change_features - recalculate device features
7219 * @dev: the device to check
7220 *
7221 * Recalculate dev->features set and send notifications even
7222 * if they have not changed. Should be called instead of
7223 * netdev_update_features() if also dev->vlan_features might
7224 * have changed to allow the changes to be propagated to stacked
7225 * VLAN devices.
7226 */
7227void netdev_change_features(struct net_device *dev)
7228{
7229 __netdev_update_features(dev);
7230 netdev_features_change(dev);
7231}
7232EXPORT_SYMBOL(netdev_change_features);
7233
7234/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007235 * netif_stacked_transfer_operstate - transfer operstate
7236 * @rootdev: the root or lower level device to transfer state from
7237 * @dev: the device to transfer operstate to
7238 *
7239 * Transfer operational state from root to device. This is normally
7240 * called when a stacking relationship exists between the root
7241 * device and the device(a leaf device).
7242 */
7243void netif_stacked_transfer_operstate(const struct net_device *rootdev,
7244 struct net_device *dev)
7245{
7246 if (rootdev->operstate == IF_OPER_DORMANT)
7247 netif_dormant_on(dev);
7248 else
7249 netif_dormant_off(dev);
7250
Zhang Shengju0575c862017-04-26 17:49:38 +08007251 if (netif_carrier_ok(rootdev))
7252 netif_carrier_on(dev);
7253 else
7254 netif_carrier_off(dev);
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007255}
7256EXPORT_SYMBOL(netif_stacked_transfer_operstate);
7257
Michael Daltona953be52014-01-16 22:23:28 -08007258#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007259static int netif_alloc_rx_queues(struct net_device *dev)
7260{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007261 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00007262 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05307263 size_t sz = count * sizeof(*rx);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007264
Tom Herbertbd25fa72010-10-18 18:00:16 +00007265 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007266
Pankaj Gupta10595902015-01-12 11:41:28 +05307267 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7268 if (!rx) {
7269 rx = vzalloc(sz);
7270 if (!rx)
7271 return -ENOMEM;
7272 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00007273 dev->_rx = rx;
7274
Tom Herbertbd25fa72010-10-18 18:00:16 +00007275 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00007276 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007277 return 0;
7278}
Tom Herbertbf264142010-11-26 08:36:09 +00007279#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00007280
Changli Gaoaa942102010-12-04 02:31:41 +00007281static void netdev_init_one_queue(struct net_device *dev,
7282 struct netdev_queue *queue, void *_unused)
7283{
7284 /* Initialize queue lock */
7285 spin_lock_init(&queue->_xmit_lock);
7286 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
7287 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00007288 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00007289 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00007290#ifdef CONFIG_BQL
7291 dql_init(&queue->dql, HZ);
7292#endif
Changli Gaoaa942102010-12-04 02:31:41 +00007293}
7294
Eric Dumazet60877a32013-06-20 01:15:51 -07007295static void netif_free_tx_queues(struct net_device *dev)
7296{
WANG Cong4cb28972014-06-02 15:55:22 -07007297 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07007298}
7299
Tom Herberte6484932010-10-18 18:04:39 +00007300static int netif_alloc_netdev_queues(struct net_device *dev)
7301{
7302 unsigned int count = dev->num_tx_queues;
7303 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07007304 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00007305
Eric Dumazetd3397272015-07-06 17:13:26 +02007306 if (count < 1 || count > 0xffff)
7307 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00007308
Eric Dumazet60877a32013-06-20 01:15:51 -07007309 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7310 if (!tx) {
7311 tx = vzalloc(sz);
7312 if (!tx)
7313 return -ENOMEM;
7314 }
Tom Herberte6484932010-10-18 18:04:39 +00007315 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00007316
Tom Herberte6484932010-10-18 18:04:39 +00007317 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
7318 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00007319
7320 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00007321}
7322
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007323void netif_tx_stop_all_queues(struct net_device *dev)
7324{
7325 unsigned int i;
7326
7327 for (i = 0; i < dev->num_tx_queues; i++) {
7328 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
tchardingf4563a72017-02-09 17:56:07 +11007329
Denys Vlasenkoa2029242015-05-11 21:17:53 +02007330 netif_tx_stop_queue(txq);
7331 }
7332}
7333EXPORT_SYMBOL(netif_tx_stop_all_queues);
7334
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08007335/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007336 * register_netdevice - register a network device
7337 * @dev: device to register
7338 *
7339 * Take a completed network device structure and add it to the kernel
7340 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7341 * chain. 0 is returned on success. A negative errno code is returned
7342 * on a failure to set up the device, or if the name is a duplicate.
7343 *
7344 * Callers must hold the rtnl semaphore. You may want
7345 * register_netdev() instead of this.
7346 *
7347 * BUGS:
7348 * The locking appears insufficient to guarantee two parallel registers
7349 * will not get the same name.
7350 */
7351
7352int register_netdevice(struct net_device *dev)
7353{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007354 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007355 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007356
7357 BUG_ON(dev_boot_phase);
7358 ASSERT_RTNL();
7359
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007360 might_sleep();
7361
Linus Torvalds1da177e2005-04-16 15:20:36 -07007362 /* When net_device's are persistent, this will be fatal. */
7363 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007364 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007365
David S. Millerf1f28aa2008-07-15 00:08:33 -07007366 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07007367 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007368
Gao feng828de4f2012-09-13 20:58:27 +00007369 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00007370 if (ret < 0)
7371 goto out;
7372
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007374 if (dev->netdev_ops->ndo_init) {
7375 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007376 if (ret) {
7377 if (ret > 0)
7378 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08007379 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007380 }
7381 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007382
Patrick McHardyf6469682013-04-19 02:04:27 +00007383 if (((dev->hw_features | dev->features) &
7384 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00007385 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
7386 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
7387 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
7388 ret = -EINVAL;
7389 goto err_uninit;
7390 }
7391
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00007392 ret = -EBUSY;
7393 if (!dev->ifindex)
7394 dev->ifindex = dev_new_index(net);
7395 else if (__dev_get_by_index(net, dev->ifindex))
7396 goto err_uninit;
7397
Michał Mirosław5455c692011-02-15 16:59:17 +00007398 /* Transfer changeable features to wanted_features and enable
7399 * software offloads (GSO and GRO).
7400 */
7401 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00007402 dev->features |= NETIF_F_SOFT_FEATURES;
7403 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007404
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007405 if (!(dev->flags & IFF_LOOPBACK))
Michał Mirosław34324dc2011-11-15 15:29:55 +00007406 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007407
Alexander Duyck7f348a62016-04-20 16:51:00 -04007408 /* If IPv4 TCP segmentation offload is supported we should also
7409 * allow the device to enable segmenting the frame with the option
7410 * of ignoring a static IP ID value. This doesn't enable the
7411 * feature itself but allows the user to enable it later.
7412 */
Alexander Duyckcbc53e02016-04-10 21:44:51 -04007413 if (dev->hw_features & NETIF_F_TSO)
7414 dev->hw_features |= NETIF_F_TSO_MANGLEID;
Alexander Duyck7f348a62016-04-20 16:51:00 -04007415 if (dev->vlan_features & NETIF_F_TSO)
7416 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
7417 if (dev->mpls_features & NETIF_F_TSO)
7418 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
7419 if (dev->hw_enc_features & NETIF_F_TSO)
7420 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07007421
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007422 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00007423 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07007424 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00007425
Pravin B Shelaree579672013-03-07 09:28:08 +00007426 /* Make NETIF_F_SG inheritable to tunnel devices.
7427 */
Alexander Duyck802ab552016-04-10 21:45:03 -04007428 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
Pravin B Shelaree579672013-03-07 09:28:08 +00007429
Simon Horman0d89d202013-05-23 21:02:52 +00007430 /* Make NETIF_F_SG inheritable to MPLS.
7431 */
7432 dev->mpls_features |= NETIF_F_SG;
7433
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00007434 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
7435 ret = notifier_to_errno(ret);
7436 if (ret)
7437 goto err_uninit;
7438
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007439 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007440 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007441 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007442 dev->reg_state = NETREG_REGISTERED;
7443
Michał Mirosław6cb6a272011-04-02 22:48:47 -07007444 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00007445
Linus Torvalds1da177e2005-04-16 15:20:36 -07007446 /*
7447 * Default initial state at registry is that the
7448 * device is present.
7449 */
7450
7451 set_bit(__LINK_STATE_PRESENT, &dev->state);
7452
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01007453 linkwatch_init_dev(dev);
7454
Linus Torvalds1da177e2005-04-16 15:20:36 -07007455 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007456 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007457 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04007458 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459
Jiri Pirko948b3372013-01-08 01:38:25 +00007460 /* If the device has permanent device address, driver should
7461 * set dev_addr and also addr_assign_type should be set to
7462 * NET_ADDR_PERM (default value).
7463 */
7464 if (dev->addr_assign_type == NET_ADDR_PERM)
7465 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7466
Linus Torvalds1da177e2005-04-16 15:20:36 -07007467 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007468 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07007469 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07007470 if (ret) {
7471 rollback_registered(dev);
7472 dev->reg_state = NETREG_UNREGISTERED;
7473 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007474 /*
7475 * Prevent userspace races by waiting until the network
7476 * device is fully setup before sending notifications.
7477 */
Patrick McHardya2835762010-02-26 06:34:51 +00007478 if (!dev->rtnl_link_ops ||
7479 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007480 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007481
7482out:
7483 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007484
7485err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08007486 if (dev->netdev_ops->ndo_uninit)
7487 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07007488 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007489}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007490EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007491
7492/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007493 * init_dummy_netdev - init a dummy network device for NAPI
7494 * @dev: device to init
7495 *
7496 * This takes a network device structure and initialize the minimum
7497 * amount of fields so it can be used to schedule NAPI polls without
7498 * registering a full blown interface. This is to be used by drivers
7499 * that need to tie several hardware interfaces to a single NAPI
7500 * poll scheduler due to HW limitations.
7501 */
7502int init_dummy_netdev(struct net_device *dev)
7503{
7504 /* Clear everything. Note we don't initialize spinlocks
7505 * are they aren't supposed to be taken by any of the
7506 * NAPI code and this dummy netdev is supposed to be
7507 * only ever used for NAPI polls
7508 */
7509 memset(dev, 0, sizeof(struct net_device));
7510
7511 /* make sure we BUG if trying to hit standard
7512 * register/unregister code path
7513 */
7514 dev->reg_state = NETREG_DUMMY;
7515
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007516 /* NAPI wants this */
7517 INIT_LIST_HEAD(&dev->napi_list);
7518
7519 /* a dummy interface is started by default */
7520 set_bit(__LINK_STATE_PRESENT, &dev->state);
7521 set_bit(__LINK_STATE_START, &dev->state);
7522
Eric Dumazet29b44332010-10-11 10:22:12 +00007523 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7524 * because users of this 'device' dont need to change
7525 * its refcount.
7526 */
7527
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08007528 return 0;
7529}
7530EXPORT_SYMBOL_GPL(init_dummy_netdev);
7531
7532
7533/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007534 * register_netdev - register a network device
7535 * @dev: device to register
7536 *
7537 * Take a completed network device structure and add it to the kernel
7538 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7539 * chain. 0 is returned on success. A negative errno code is returned
7540 * on a failure to set up the device, or if the name is a duplicate.
7541 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07007542 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543 * and expands the device name if you passed a format string to
7544 * alloc_netdev.
7545 */
7546int register_netdev(struct net_device *dev)
7547{
7548 int err;
7549
7550 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007551 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007552 rtnl_unlock();
7553 return err;
7554}
7555EXPORT_SYMBOL(register_netdev);
7556
Eric Dumazet29b44332010-10-11 10:22:12 +00007557int netdev_refcnt_read(const struct net_device *dev)
7558{
7559 int i, refcnt = 0;
7560
7561 for_each_possible_cpu(i)
7562 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7563 return refcnt;
7564}
7565EXPORT_SYMBOL(netdev_refcnt_read);
7566
Ben Hutchings2c530402012-07-10 10:55:09 +00007567/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007568 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00007569 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07007570 *
7571 * This is called when unregistering network devices.
7572 *
7573 * Any protocol or device that holds a reference should register
7574 * for netdevice notification, and cleanup and put back the
7575 * reference if they receive an UNREGISTER event.
7576 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007577 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578 */
7579static void netdev_wait_allrefs(struct net_device *dev)
7580{
7581 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00007582 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007583
Eric Dumazete014deb2009-11-17 05:59:21 +00007584 linkwatch_forget_dev(dev);
7585
Linus Torvalds1da177e2005-04-16 15:20:36 -07007586 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00007587 refcnt = netdev_refcnt_read(dev);
7588
7589 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007590 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007591 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007592
7593 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07007594 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007595
Eric Dumazet748e2d92012-08-22 21:50:59 +00007596 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007597 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00007598 rtnl_lock();
7599
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007600 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007601 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7602 &dev->state)) {
7603 /* We must not have linkwatch events
7604 * pending on unregister. If this
7605 * happens, we simply run the queue
7606 * unscheduled, resulting in a noop
7607 * for this device.
7608 */
7609 linkwatch_run_queue();
7610 }
7611
Stephen Hemminger6756ae42006-03-20 22:23:58 -08007612 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007613
7614 rebroadcast_time = jiffies;
7615 }
7616
7617 msleep(250);
7618
Eric Dumazet29b44332010-10-11 10:22:12 +00007619 refcnt = netdev_refcnt_read(dev);
7620
Linus Torvalds1da177e2005-04-16 15:20:36 -07007621 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007622 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7623 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007624 warning_time = jiffies;
7625 }
7626 }
7627}
7628
7629/* The sequence is:
7630 *
7631 * rtnl_lock();
7632 * ...
7633 * register_netdevice(x1);
7634 * register_netdevice(x2);
7635 * ...
7636 * unregister_netdevice(y1);
7637 * unregister_netdevice(y2);
7638 * ...
7639 * rtnl_unlock();
7640 * free_netdev(y1);
7641 * free_netdev(y2);
7642 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07007643 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007644 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007645 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07007646 * without deadlocking with linkwatch via keventd.
7647 * 2) Since we run with the RTNL semaphore not held, we can sleep
7648 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07007649 *
7650 * We must not return until all unregister events added during
7651 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007652 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007653void netdev_run_todo(void)
7654{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007655 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007656
Linus Torvalds1da177e2005-04-16 15:20:36 -07007657 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007658 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07007659
7660 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07007661
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007662
7663 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00007664 if (!list_empty(&list))
7665 rcu_barrier();
7666
Linus Torvalds1da177e2005-04-16 15:20:36 -07007667 while (!list_empty(&list)) {
7668 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00007669 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007670 list_del(&dev->todo_list);
7671
Eric Dumazet748e2d92012-08-22 21:50:59 +00007672 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007673 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00007674 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007675
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007676 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007677 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007678 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007679 dump_stack();
7680 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007681 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007682
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007683 dev->reg_state = NETREG_UNREGISTERED;
7684
7685 netdev_wait_allrefs(dev);
7686
7687 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00007688 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08007689 BUG_ON(!list_empty(&dev->ptype_all));
7690 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00007691 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7692 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07007693 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007694
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007695 if (dev->destructor)
7696 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007697
Eric W. Biederman50624c92013-09-23 21:19:49 -07007698 /* Report a network device has been unregistered */
7699 rtnl_lock();
7700 dev_net(dev)->dev_unreg_count--;
7701 __rtnl_unlock();
7702 wake_up(&netdev_unregistering_wq);
7703
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007704 /* Free network device */
7705 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007707}
7708
Jarod Wilson92566452016-02-01 18:51:04 -05007709/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7710 * all the same fields in the same order as net_device_stats, with only
7711 * the type differing, but rtnl_link_stats64 may have additional fields
7712 * at the end for newer counters.
Ben Hutchings3cfde792010-07-09 09:11:52 +00007713 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007714void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7715 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00007716{
7717#if BITS_PER_LONG == 64
Jarod Wilson92566452016-02-01 18:51:04 -05007718 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007719 memcpy(stats64, netdev_stats, sizeof(*stats64));
Jarod Wilson92566452016-02-01 18:51:04 -05007720 /* zero out counters that only exist in rtnl_link_stats64 */
7721 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7722 sizeof(*stats64) - sizeof(*netdev_stats));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007723#else
Jarod Wilson92566452016-02-01 18:51:04 -05007724 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007725 const unsigned long *src = (const unsigned long *)netdev_stats;
7726 u64 *dst = (u64 *)stats64;
7727
Jarod Wilson92566452016-02-01 18:51:04 -05007728 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007729 for (i = 0; i < n; i++)
7730 dst[i] = src[i];
Jarod Wilson92566452016-02-01 18:51:04 -05007731 /* zero out counters that only exist in rtnl_link_stats64 */
7732 memset((char *)stats64 + n * sizeof(u64), 0,
7733 sizeof(*stats64) - n * sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007734#endif
7735}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007736EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007737
Eric Dumazetd83345a2009-11-16 03:36:51 +00007738/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007739 * dev_get_stats - get network device statistics
7740 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07007741 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007742 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00007743 * Get network statistics from device. Return @storage.
7744 * The device driver may provide its own method by setting
7745 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7746 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007747 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00007748struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7749 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00007750{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007751 const struct net_device_ops *ops = dev->netdev_ops;
7752
Eric Dumazet28172732010-07-07 14:58:56 -07007753 if (ops->ndo_get_stats64) {
7754 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007755 ops->ndo_get_stats64(dev, storage);
7756 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00007757 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007758 } else {
7759 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07007760 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007761 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet015f0682014-03-27 08:45:56 -07007762 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
Jarod Wilson6e7333d2016-02-01 18:51:05 -05007763 storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
Eric Dumazet28172732010-07-07 14:58:56 -07007764 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07007765}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007766EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07007767
Eric Dumazet24824a02010-10-02 06:11:55 +00007768struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07007769{
Eric Dumazet24824a02010-10-02 06:11:55 +00007770 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07007771
Eric Dumazet24824a02010-10-02 06:11:55 +00007772#ifdef CONFIG_NET_CLS_ACT
7773 if (queue)
7774 return queue;
7775 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7776 if (!queue)
7777 return NULL;
7778 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007779 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00007780 queue->qdisc_sleeping = &noop_qdisc;
7781 rcu_assign_pointer(dev->ingress_queue, queue);
7782#endif
7783 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07007784}
7785
Eric Dumazet2c60db02012-09-16 09:17:26 +00007786static const struct ethtool_ops default_ethtool_ops;
7787
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00007788void netdev_set_default_ethtool_ops(struct net_device *dev,
7789 const struct ethtool_ops *ops)
7790{
7791 if (dev->ethtool_ops == &default_ethtool_ops)
7792 dev->ethtool_ops = ops;
7793}
7794EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7795
Eric Dumazet74d332c2013-10-30 13:10:44 -07007796void netdev_freemem(struct net_device *dev)
7797{
7798 char *addr = (char *)dev - dev->padded;
7799
WANG Cong4cb28972014-06-02 15:55:22 -07007800 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007801}
7802
Linus Torvalds1da177e2005-04-16 15:20:36 -07007803/**
tcharding722c9a02017-02-09 17:56:04 +11007804 * alloc_netdev_mqs - allocate network device
7805 * @sizeof_priv: size of private data to allocate space for
7806 * @name: device name format string
7807 * @name_assign_type: origin of device name
7808 * @setup: callback to initialize device
7809 * @txqs: the number of TX subqueues to allocate
7810 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07007811 *
tcharding722c9a02017-02-09 17:56:04 +11007812 * Allocates a struct net_device with private data area for driver use
7813 * and performs basic initialization. Also allocates subqueue structs
7814 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007815 */
Tom Herbert36909ea2011-01-09 19:36:31 +00007816struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02007817 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00007818 void (*setup)(struct net_device *),
7819 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007820{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007821 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07007822 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007823 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007824
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07007825 BUG_ON(strlen(name) >= sizeof(dev->name));
7826
Tom Herbert36909ea2011-01-09 19:36:31 +00007827 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007828 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00007829 return NULL;
7830 }
7831
Michael Daltona953be52014-01-16 22:23:28 -08007832#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00007833 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007834 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00007835 return NULL;
7836 }
7837#endif
7838
David S. Millerfd2ea0a2008-07-17 01:56:23 -07007839 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007840 if (sizeof_priv) {
7841 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007842 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007843 alloc_size += sizeof_priv;
7844 }
7845 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007846 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007847
Eric Dumazet74d332c2013-10-30 13:10:44 -07007848 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7849 if (!p)
7850 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00007851 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007852 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007853
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007854 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007855 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007856
Eric Dumazet29b44332010-10-11 10:22:12 +00007857 dev->pcpu_refcnt = alloc_percpu(int);
7858 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07007859 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007860
Linus Torvalds1da177e2005-04-16 15:20:36 -07007861 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00007862 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007863
Jiri Pirko22bedad32010-04-01 21:22:57 +00007864 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007865 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00007866
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007867 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007868
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07007869 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00007870 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007871
Herbert Xud565b0a2008-12-15 23:38:52 -08007872 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007873 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007874 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00007875 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007876 INIT_LIST_HEAD(&dev->adj_list.upper);
7877 INIT_LIST_HEAD(&dev->adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08007878 INIT_LIST_HEAD(&dev->ptype_all);
7879 INIT_LIST_HEAD(&dev->ptype_specific);
Jiri Kosina59cc1f62016-08-10 11:05:15 +02007880#ifdef CONFIG_NET_SCHED
7881 hash_init(dev->qdisc_hash);
7882#endif
Eric Dumazet02875872014-10-05 18:38:35 -07007883 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007884 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007885
Phil Suttera8131042016-02-17 15:37:43 +01007886 if (!dev->tx_queue_len) {
Phil Sutterf84bb1e2015-08-27 21:21:36 +02007887 dev->priv_flags |= IFF_NO_QUEUE;
Jesper Dangaard Brouer11597082016-11-03 14:56:06 +01007888 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
Phil Suttera8131042016-02-17 15:37:43 +01007889 }
Phil Sutter906470c2015-08-18 10:30:48 +02007890
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007891 dev->num_tx_queues = txqs;
7892 dev->real_num_tx_queues = txqs;
7893 if (netif_alloc_netdev_queues(dev))
7894 goto free_all;
7895
Michael Daltona953be52014-01-16 22:23:28 -08007896#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007897 dev->num_rx_queues = rxqs;
7898 dev->real_num_rx_queues = rxqs;
7899 if (netif_alloc_rx_queues(dev))
7900 goto free_all;
7901#endif
7902
Linus Torvalds1da177e2005-04-16 15:20:36 -07007903 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02007904 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00007905 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00007906 if (!dev->ethtool_ops)
7907 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +02007908
7909 nf_hook_ingress_init(dev);
7910
Linus Torvalds1da177e2005-04-16 15:20:36 -07007911 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007912
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007913free_all:
7914 free_netdev(dev);
7915 return NULL;
7916
Eric Dumazet29b44332010-10-11 10:22:12 +00007917free_pcpu:
7918 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007919free_dev:
7920 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007921 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007922}
Tom Herbert36909ea2011-01-09 19:36:31 +00007923EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007924
7925/**
tcharding722c9a02017-02-09 17:56:04 +11007926 * free_netdev - free network device
7927 * @dev: device
Linus Torvalds1da177e2005-04-16 15:20:36 -07007928 *
tcharding722c9a02017-02-09 17:56:04 +11007929 * This function does the last stage of destroying an allocated device
7930 * interface. The reference to the device object is released. If this
7931 * is the last reference then it will be freed.Must be called in process
7932 * context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007933 */
7934void free_netdev(struct net_device *dev)
7935{
Herbert Xud565b0a2008-12-15 23:38:52 -08007936 struct napi_struct *p, *n;
David S. Millerb5cdae32017-04-18 15:36:58 -04007937 struct bpf_prog *prog;
Herbert Xud565b0a2008-12-15 23:38:52 -08007938
Eric Dumazet93d05d42015-11-18 06:31:03 -08007939 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -07007940 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08007941#ifdef CONFIG_SYSFS
Pankaj Gupta10595902015-01-12 11:41:28 +05307942 kvfree(dev->_rx);
Tom Herbertfe822242010-11-09 10:47:38 +00007943#endif
David S. Millere8a04642008-07-17 00:34:19 -07007944
Eric Dumazet33d480c2011-08-11 19:30:52 +00007945 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00007946
Jiri Pirkof001fde2009-05-05 02:48:28 +00007947 /* Flush device addresses */
7948 dev_addr_flush(dev);
7949
Herbert Xud565b0a2008-12-15 23:38:52 -08007950 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7951 netif_napi_del(p);
7952
Eric Dumazet29b44332010-10-11 10:22:12 +00007953 free_percpu(dev->pcpu_refcnt);
7954 dev->pcpu_refcnt = NULL;
7955
David S. Millerb5cdae32017-04-18 15:36:58 -04007956 prog = rcu_dereference_protected(dev->xdp_prog, 1);
7957 if (prog) {
7958 bpf_prog_put(prog);
7959 static_key_slow_dec(&generic_xdp_needed);
7960 }
7961
Stephen Hemminger3041a062006-05-26 13:25:24 -07007962 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007963 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07007964 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007965 return;
7966 }
7967
7968 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7969 dev->reg_state = NETREG_RELEASED;
7970
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07007971 /* will free via device release */
7972 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007973}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007974EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007975
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007976/**
7977 * synchronize_net - Synchronize with packet receive processing
7978 *
7979 * Wait for packets currently being received to be done.
7980 * Does not block later packets from starting.
7981 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007982void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007983{
7984 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00007985 if (rtnl_is_locked())
7986 synchronize_rcu_expedited();
7987 else
7988 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007989}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007990EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007991
7992/**
Eric Dumazet44a08732009-10-27 07:03:04 +00007993 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07007994 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00007995 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08007996 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007997 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08007998 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00007999 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008000 *
8001 * Callers must hold the rtnl semaphore. You may want
8002 * unregister_netdev() instead of this.
8003 */
8004
Eric Dumazet44a08732009-10-27 07:03:04 +00008005void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008006{
Herbert Xua6620712007-12-12 19:21:56 -08008007 ASSERT_RTNL();
8008
Eric Dumazet44a08732009-10-27 07:03:04 +00008009 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00008010 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00008011 } else {
8012 rollback_registered(dev);
8013 /* Finish processing unregister after unlock */
8014 net_set_todo(dev);
8015 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008016}
Eric Dumazet44a08732009-10-27 07:03:04 +00008017EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008018
8019/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008020 * unregister_netdevice_many - unregister many devices
8021 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07008022 *
8023 * Note: As most callers use a stack allocated list_head,
8024 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008025 */
8026void unregister_netdevice_many(struct list_head *head)
8027{
8028 struct net_device *dev;
8029
8030 if (!list_empty(head)) {
8031 rollback_registered_many(head);
8032 list_for_each_entry(dev, head, unreg_list)
8033 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07008034 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008035 }
8036}
Eric Dumazet63c80992009-10-27 07:06:49 +00008037EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008038
8039/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008040 * unregister_netdev - remove device from the kernel
8041 * @dev: device
8042 *
8043 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08008044 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008045 *
8046 * This is just a wrapper for unregister_netdevice that takes
8047 * the rtnl semaphore. In general you want to use this and not
8048 * unregister_netdevice.
8049 */
8050void unregister_netdev(struct net_device *dev)
8051{
8052 rtnl_lock();
8053 unregister_netdevice(dev);
8054 rtnl_unlock();
8055}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008056EXPORT_SYMBOL(unregister_netdev);
8057
Eric W. Biedermance286d32007-09-12 13:53:49 +02008058/**
8059 * dev_change_net_namespace - move device to different nethost namespace
8060 * @dev: device
8061 * @net: network namespace
8062 * @pat: If not NULL name pattern to try if the current device name
8063 * is already taken in the destination network namespace.
8064 *
8065 * This function shuts down a device interface and moves it
8066 * to a new network namespace. On success 0 is returned, on
8067 * a failure a netagive errno code is returned.
8068 *
8069 * Callers must hold the rtnl semaphore.
8070 */
8071
8072int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
8073{
Eric W. Biedermance286d32007-09-12 13:53:49 +02008074 int err;
8075
8076 ASSERT_RTNL();
8077
8078 /* Don't allow namespace local devices to be moved. */
8079 err = -EINVAL;
8080 if (dev->features & NETIF_F_NETNS_LOCAL)
8081 goto out;
8082
8083 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008084 if (dev->reg_state != NETREG_REGISTERED)
8085 goto out;
8086
8087 /* Get out if there is nothing todo */
8088 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09008089 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008090 goto out;
8091
8092 /* Pick the destination device name, and ensure
8093 * we can use it in the destination network namespace.
8094 */
8095 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00008096 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008097 /* We get here if we can't use the current device name */
8098 if (!pat)
8099 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00008100 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008101 goto out;
8102 }
8103
8104 /*
8105 * And now a mini version of register_netdevice unregister_netdevice.
8106 */
8107
8108 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07008109 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008110
8111 /* And unlink it from device chain */
8112 err = -ENODEV;
8113 unlist_netdevice(dev);
8114
8115 synchronize_net();
8116
8117 /* Shutdown queueing discipline. */
8118 dev_shutdown(dev);
8119
8120 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11008121 * this device. They should clean all the things.
8122 *
8123 * Note that dev->reg_state stays at NETREG_REGISTERED.
8124 * This is wanted because this way 8021q and macvlan know
8125 * the device is just moving and can keep their slaves up.
8126 */
Eric W. Biedermance286d32007-09-12 13:53:49 +02008127 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00008128 rcu_barrier();
8129 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008130 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008131
8132 /*
8133 * Flush the unicast and multicast chains
8134 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00008135 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00008136 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008137
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008138 /* Send a netdev-removed uevent to the old namespace */
8139 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008140 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008141
Eric W. Biedermance286d32007-09-12 13:53:49 +02008142 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09008143 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008144
Eric W. Biedermance286d32007-09-12 13:53:49 +02008145 /* If there is an ifindex conflict assign a new one */
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02008146 if (__dev_get_by_index(net, dev->ifindex))
Eric W. Biedermance286d32007-09-12 13:53:49 +02008147 dev->ifindex = dev_new_index(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008148
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008149 /* Send a netdev-add uevent to the new namespace */
8150 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008151 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00008152
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008153 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07008154 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008155 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008156
8157 /* Add the device back in the hashes */
8158 list_netdevice(dev);
8159
8160 /* Notify protocols, that a new device appeared. */
8161 call_netdevice_notifiers(NETDEV_REGISTER, dev);
8162
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008163 /*
8164 * Prevent userspace races by waiting until the network
8165 * device is fully setup before sending notifications.
8166 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008167 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00008168
Eric W. Biedermance286d32007-09-12 13:53:49 +02008169 synchronize_net();
8170 err = 0;
8171out:
8172 return err;
8173}
Johannes Berg463d0182009-07-14 00:33:35 +02008174EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008175
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008176static int dev_cpu_dead(unsigned int oldcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008177{
8178 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008179 struct sk_buff *skb;
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008180 unsigned int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008181 struct softnet_data *sd, *oldsd;
8182
Linus Torvalds1da177e2005-04-16 15:20:36 -07008183 local_irq_disable();
8184 cpu = smp_processor_id();
8185 sd = &per_cpu(softnet_data, cpu);
8186 oldsd = &per_cpu(softnet_data, oldcpu);
8187
8188 /* Find end of our completion_queue. */
8189 list_skb = &sd->completion_queue;
8190 while (*list_skb)
8191 list_skb = &(*list_skb)->next;
8192 /* Append completion queue from offline CPU. */
8193 *list_skb = oldsd->completion_queue;
8194 oldsd->completion_queue = NULL;
8195
Linus Torvalds1da177e2005-04-16 15:20:36 -07008196 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00008197 if (oldsd->output_queue) {
8198 *sd->output_queue_tailp = oldsd->output_queue;
8199 sd->output_queue_tailp = oldsd->output_queue_tailp;
8200 oldsd->output_queue = NULL;
8201 oldsd->output_queue_tailp = &oldsd->output_queue;
8202 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008203 /* Append NAPI poll list from offline CPU, with one exception :
8204 * process_backlog() must be called by cpu owning percpu backlog.
8205 * We properly handle process_queue & input_pkt_queue later.
8206 */
8207 while (!list_empty(&oldsd->poll_list)) {
8208 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
8209 struct napi_struct,
8210 poll_list);
8211
8212 list_del_init(&napi->poll_list);
8213 if (napi->poll == process_backlog)
8214 napi->state = 0;
8215 else
8216 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00008217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008218
8219 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8220 local_irq_enable();
8221
8222 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00008223 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008224 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008225 input_queue_head_incr(oldsd);
8226 }
Eric Dumazetac64da02015-01-15 17:04:22 -08008227 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08008228 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00008229 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07008230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008231
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008232 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008233}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008234
Herbert Xu7f353bf2007-08-10 15:47:58 -07008235/**
Herbert Xub63365a2008-10-23 01:11:29 -07008236 * netdev_increment_features - increment feature set by one
8237 * @all: current feature set
8238 * @one: new feature set
8239 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07008240 *
8241 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07008242 * @one to the master device with current feature set @all. Will not
8243 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07008244 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00008245netdev_features_t netdev_increment_features(netdev_features_t all,
8246 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07008247{
Tom Herbertc8cd0982015-12-14 11:19:44 -08008248 if (mask & NETIF_F_HW_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08008249 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +00008250 mask |= NETIF_F_VLAN_CHALLENGED;
8251
Tom Herberta1882222015-12-14 11:19:43 -08008252 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +00008253 all &= one | ~NETIF_F_ALL_FOR_ALL;
8254
Michał Mirosław1742f182011-04-22 06:31:16 +00008255 /* If one device supports hw checksumming, set for all. */
Tom Herbertc8cd0982015-12-14 11:19:44 -08008256 if (all & NETIF_F_HW_CSUM)
8257 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008258
8259 return all;
8260}
Herbert Xub63365a2008-10-23 01:11:29 -07008261EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07008262
Baruch Siach430f03c2013-06-02 20:43:55 +00008263static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008264{
8265 int i;
8266 struct hlist_head *hash;
8267
8268 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
8269 if (hash != NULL)
8270 for (i = 0; i < NETDEV_HASHENTRIES; i++)
8271 INIT_HLIST_HEAD(&hash[i]);
8272
8273 return hash;
8274}
8275
Eric W. Biederman881d9662007-09-17 11:56:21 -07008276/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07008277static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008278{
Rustad, Mark D734b6542012-07-18 09:06:07 +00008279 if (net != &init_net)
8280 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07008281
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008282 net->dev_name_head = netdev_create_hash();
8283 if (net->dev_name_head == NULL)
8284 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008285
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008286 net->dev_index_head = netdev_create_hash();
8287 if (net->dev_index_head == NULL)
8288 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008289
8290 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07008291
8292err_idx:
8293 kfree(net->dev_name_head);
8294err_name:
8295 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008296}
8297
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008298/**
8299 * netdev_drivername - network driver for the device
8300 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008301 *
8302 * Determine network driver for device.
8303 */
David S. Miller3019de12011-06-06 16:41:33 -07008304const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07008305{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07008306 const struct device_driver *driver;
8307 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07008308 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07008309
8310 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008311 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07008312 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008313
8314 driver = parent->driver;
8315 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07008316 return driver->name;
8317 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07008318}
8319
Joe Perches6ea754e2014-09-22 11:10:50 -07008320static void __netdev_printk(const char *level, const struct net_device *dev,
8321 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00008322{
Joe Perchesb004ff42012-09-12 20:12:19 -07008323 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008324 dev_printk_emit(level[1] - '0',
8325 dev->dev.parent,
8326 "%s %s %s%s: %pV",
8327 dev_driver_string(dev->dev.parent),
8328 dev_name(dev->dev.parent),
8329 netdev_name(dev), netdev_reg_state(dev),
8330 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008331 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07008332 printk("%s%s%s: %pV",
8333 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008334 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07008335 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008336 }
Joe Perches256df2f2010-06-27 01:02:35 +00008337}
8338
Joe Perches6ea754e2014-09-22 11:10:50 -07008339void netdev_printk(const char *level, const struct net_device *dev,
8340 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00008341{
8342 struct va_format vaf;
8343 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00008344
8345 va_start(args, format);
8346
8347 vaf.fmt = format;
8348 vaf.va = &args;
8349
Joe Perches6ea754e2014-09-22 11:10:50 -07008350 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07008351
Joe Perches256df2f2010-06-27 01:02:35 +00008352 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00008353}
8354EXPORT_SYMBOL(netdev_printk);
8355
8356#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07008357void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00008358{ \
Joe Perches256df2f2010-06-27 01:02:35 +00008359 struct va_format vaf; \
8360 va_list args; \
8361 \
8362 va_start(args, fmt); \
8363 \
8364 vaf.fmt = fmt; \
8365 vaf.va = &args; \
8366 \
Joe Perches6ea754e2014-09-22 11:10:50 -07008367 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07008368 \
Joe Perches256df2f2010-06-27 01:02:35 +00008369 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00008370} \
8371EXPORT_SYMBOL(func);
8372
8373define_netdev_printk_level(netdev_emerg, KERN_EMERG);
8374define_netdev_printk_level(netdev_alert, KERN_ALERT);
8375define_netdev_printk_level(netdev_crit, KERN_CRIT);
8376define_netdev_printk_level(netdev_err, KERN_ERR);
8377define_netdev_printk_level(netdev_warn, KERN_WARNING);
8378define_netdev_printk_level(netdev_notice, KERN_NOTICE);
8379define_netdev_printk_level(netdev_info, KERN_INFO);
8380
Pavel Emelyanov46650792007-10-08 20:38:39 -07008381static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07008382{
8383 kfree(net->dev_name_head);
8384 kfree(net->dev_index_head);
8385}
8386
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008387static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07008388 .init = netdev_init,
8389 .exit = netdev_exit,
8390};
8391
Pavel Emelyanov46650792007-10-08 20:38:39 -07008392static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02008393{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008394 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02008395 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008396 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02008397 * initial network namespace
8398 */
8399 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008400 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008401 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008402 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02008403
8404 /* Ignore unmoveable devices (i.e. loopback) */
8405 if (dev->features & NETIF_F_NETNS_LOCAL)
8406 continue;
8407
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00008408 /* Leave virtual devices for the generic cleanup */
8409 if (dev->rtnl_link_ops)
8410 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08008411
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008412 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008413 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
8414 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02008415 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008416 pr_emerg("%s: failed to move %s to init_net: %d\n",
8417 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07008418 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02008419 }
8420 }
8421 rtnl_unlock();
8422}
8423
Eric W. Biederman50624c92013-09-23 21:19:49 -07008424static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
8425{
8426 /* Return with the rtnl_lock held when there are no network
8427 * devices unregistering in any network namespace in net_list.
8428 */
8429 struct net *net;
8430 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01008431 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008432
Peter Zijlstraff960a72014-10-29 17:04:56 +01008433 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008434 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07008435 unregistering = false;
8436 rtnl_lock();
8437 list_for_each_entry(net, net_list, exit_list) {
8438 if (net->dev_unreg_count > 0) {
8439 unregistering = true;
8440 break;
8441 }
8442 }
8443 if (!unregistering)
8444 break;
8445 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01008446
8447 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008448 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01008449 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008450}
8451
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008452static void __net_exit default_device_exit_batch(struct list_head *net_list)
8453{
8454 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04008455 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008456 * Do this across as many network namespaces as possible to
8457 * improve batching efficiency.
8458 */
8459 struct net_device *dev;
8460 struct net *net;
8461 LIST_HEAD(dev_kill_list);
8462
Eric W. Biederman50624c92013-09-23 21:19:49 -07008463 /* To prevent network device cleanup code from dereferencing
8464 * loopback devices or network devices that have been freed
8465 * wait here for all pending unregistrations to complete,
8466 * before unregistring the loopback device and allowing the
8467 * network namespace be freed.
8468 *
8469 * The netdev todo list containing all network devices
8470 * unregistrations that happen in default_device_exit_batch
8471 * will run in the rtnl_unlock() at the end of
8472 * default_device_exit_batch.
8473 */
8474 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008475 list_for_each_entry(net, net_list, exit_list) {
8476 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02008477 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008478 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8479 else
8480 unregister_netdevice_queue(dev, &dev_kill_list);
8481 }
8482 }
8483 unregister_netdevice_many(&dev_kill_list);
8484 rtnl_unlock();
8485}
8486
Denis V. Lunev022cbae2007-11-13 03:23:50 -08008487static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02008488 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00008489 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02008490};
8491
Linus Torvalds1da177e2005-04-16 15:20:36 -07008492/*
8493 * Initialize the DEV module. At boot time this walks the device list and
8494 * unhooks any devices that fail to initialise (normally hardware not
8495 * present) and leaves us with a valid list of present and active devices.
8496 *
8497 */
8498
8499/*
8500 * This is called single threaded during boot, so no need
8501 * to take the rtnl semaphore.
8502 */
8503static int __init net_dev_init(void)
8504{
8505 int i, rc = -ENOMEM;
8506
8507 BUG_ON(!dev_boot_phase);
8508
Linus Torvalds1da177e2005-04-16 15:20:36 -07008509 if (dev_proc_init())
8510 goto out;
8511
Eric W. Biederman8b41d182007-09-26 22:02:53 -07008512 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07008513 goto out;
8514
8515 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08008516 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008517 INIT_LIST_HEAD(&ptype_base[i]);
8518
Vlad Yasevich62532da2012-11-15 08:49:10 +00008519 INIT_LIST_HEAD(&offload_base);
8520
Eric W. Biederman881d9662007-09-17 11:56:21 -07008521 if (register_pernet_subsys(&netdev_net_ops))
8522 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008523
8524 /*
8525 * Initialise the packet receive queues.
8526 */
8527
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07008528 for_each_possible_cpu(i) {
Eric Dumazet41852492016-08-26 12:50:39 -07008529 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008530 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008531
Eric Dumazet41852492016-08-26 12:50:39 -07008532 INIT_WORK(flush, flush_backlog);
8533
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008534 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07008535 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008536 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00008537 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00008538#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008539 sd->csd.func = rps_trigger_softirq;
8540 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008541 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07008542#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00008543
Eric Dumazete36fa2f2010-04-19 21:17:14 +00008544 sd->backlog.poll = process_backlog;
8545 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008546 }
8547
Linus Torvalds1da177e2005-04-16 15:20:36 -07008548 dev_boot_phase = 0;
8549
Eric W. Biederman505d4f72008-11-07 22:54:20 -08008550 /* The loopback device is special if any other network devices
8551 * is present in a network namespace the loopback device must
8552 * be present. Since we now dynamically allocate and free the
8553 * loopback device ensure this invariant is maintained by
8554 * keeping the loopback device as the first device on the
8555 * list of network devices. Ensuring the loopback devices
8556 * is the first device that appears and the last network device
8557 * that disappears.
8558 */
8559 if (register_pernet_device(&loopback_net_ops))
8560 goto out;
8561
8562 if (register_pernet_device(&default_device_ops))
8563 goto out;
8564
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008565 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8566 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008567
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +01008568 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
8569 NULL, dev_cpu_dead);
8570 WARN_ON(rc < 0);
Thomas Graff38a9eb2015-07-21 10:43:56 +02008571 dst_subsys_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008572 rc = 0;
8573out:
8574 return rc;
8575}
8576
8577subsys_initcall(net_dev_init);