blob: 5a3b5a404642fd1e5768ac66994d5a566fcc2871 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -080099#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700103#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/pkt_sched.h>
105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <linux/netpoll.h>
111#include <linux/rcupdate.h>
112#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500115#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700116#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700117#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700118#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700119#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700120#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700122#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700123#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700124#include <linux/ipv6.h>
125#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700126#include <linux/jhash.h>
127#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700128#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900129#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900130#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000131#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700132#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000133#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100134#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300135#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700136#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100137#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400138#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800139#include <linux/hrtimer.h>
Pablo Neirae687ad62015-05-13 18:19:38 +0200140#include <linux/netfilter_ingress.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700142#include "net-sysfs.h"
143
Herbert Xud565b0a2008-12-15 23:38:52 -0800144/* Instead of increasing this, you should create a hash table. */
145#define MAX_GRO_SKBS 8
146
Herbert Xu5d38a072009-01-04 16:13:40 -0800147/* This should be increased if a protocol with a bigger head is added. */
148#define GRO_MAX_HEAD (MAX_HEADER + 128)
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000151static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000152struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
153struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000154static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000156static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli5495119462014-07-01 21:39:43 -0700157static int call_netdevice_notifiers_info(unsigned long val,
158 struct net_device *dev,
159 struct netdev_notifier_info *info);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700162 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 * semaphore.
164 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800165 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 *
167 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700168 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * actual updates. This allows pure readers to access the list even
170 * while a writer is preparing to update it.
171 *
172 * To put it another way, dev_base_lock is held for writing only to
173 * protect against pure readers; the rtnl semaphore provides the
174 * protection against other writers.
175 *
176 * See, for example usages, register_netdevice() and
177 * unregister_netdevice(), which must be called with the rtnl
178 * semaphore held.
179 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181EXPORT_SYMBOL(dev_base_lock);
182
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300183/* protects napi_hash addition/deletion and napi_gen_id */
184static DEFINE_SPINLOCK(napi_hash_lock);
185
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800186static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800187static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300188
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200189static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000190
Thomas Graf4e985ad2011-06-21 03:11:20 +0000191static inline void dev_base_seq_inc(struct net *net)
192{
193 while (++net->dev_base_seq == 0);
194}
195
Eric W. Biederman881d9662007-09-17 11:56:21 -0700196static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
Eric Dumazet95c96172012-04-15 05:58:06 +0000198 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
199
stephen hemminger08e98972009-11-10 07:20:34 +0000200 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700205 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000208static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000209{
210#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000211 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000212#endif
213}
214
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000215static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000216{
217#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000218 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000219#endif
220}
221
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000223static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200224{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900225 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200226
227 ASSERT_RTNL();
228
229 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800230 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000231 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000232 hlist_add_head_rcu(&dev->index_hlist,
233 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000235
236 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200237}
238
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000239/* Device list removal
240 * caller must respect a RCU grace period before freeing/reusing dev
241 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200242static void unlist_netdevice(struct net_device *dev)
243{
244 ASSERT_RTNL();
245
246 /* Unlink dev from the device chain */
247 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800248 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000249 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000250 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200251 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000252
253 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200254}
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256/*
257 * Our notifier list
258 */
259
Alan Sternf07d5b92006-05-09 15:23:03 -0700260static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262/*
263 * Device drivers call our routines to queue packets here. We empty the
264 * queue in the local softnet handler.
265 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700266
Eric Dumazet9958da02010-04-17 04:17:02 +0000267DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700268EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
David S. Millercf508b12008-07-22 14:16:42 -0700270#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700271/*
David S. Millerc773e842008-07-08 23:13:53 -0700272 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273 * according to dev->type
274 */
275static const unsigned short netdev_lock_type[] =
276 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
277 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
278 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
279 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
280 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
281 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
282 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
283 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
284 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
285 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
286 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
287 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400288 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
289 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
290 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700291
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700292static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700293 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
294 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
295 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
296 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
297 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
298 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
299 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
300 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
301 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
302 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
303 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
304 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400305 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
306 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
307 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700308
309static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700310static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700311
312static inline unsigned short netdev_lock_pos(unsigned short dev_type)
313{
314 int i;
315
316 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
317 if (netdev_lock_type[i] == dev_type)
318 return i;
319 /* the last key is used by default */
320 return ARRAY_SIZE(netdev_lock_type) - 1;
321}
322
David S. Millercf508b12008-07-22 14:16:42 -0700323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700325{
326 int i;
327
328 i = netdev_lock_pos(dev_type);
329 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
330 netdev_lock_name[i]);
331}
David S. Millercf508b12008-07-22 14:16:42 -0700332
333static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
334{
335 int i;
336
337 i = netdev_lock_pos(dev->type);
338 lockdep_set_class_and_name(&dev->addr_list_lock,
339 &netdev_addr_lock_key[i],
340 netdev_lock_name[i]);
341}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700342#else
David S. Millercf508b12008-07-22 14:16:42 -0700343static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
344 unsigned short dev_type)
345{
346}
347static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700348{
349}
350#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352/*******************************************************************************
353
354 Protocol management and registration routines
355
356*******************************************************************************/
357
358/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 * Add a protocol ID to the list. Now that the input handler is
360 * smarter we can dispense with all the messy stuff that used to be
361 * here.
362 *
363 * BEWARE!!! Protocol handlers, mangling input packets,
364 * MUST BE last in hash buckets and checking protocol handlers
365 * MUST start from promiscuous ptype_all chain in net_bh.
366 * It is true now, do not change it.
367 * Explanation follows: if protocol handler, mangling packet, will
368 * be the first on list, it is not able to sense, that packet
369 * is cloned and should be copied-on-write, so that it will
370 * change it and subsequent readers will get broken packet.
371 * --ANK (980803)
372 */
373
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000374static inline struct list_head *ptype_head(const struct packet_type *pt)
375{
376 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800377 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000378 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800379 return pt->dev ? &pt->dev->ptype_specific :
380 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383/**
384 * dev_add_pack - add packet handler
385 * @pt: packet type declaration
386 *
387 * Add a protocol handler to the networking stack. The passed &packet_type
388 * is linked into kernel lists and may not be freed until it has been
389 * removed from the kernel lists.
390 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900391 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 * guarantee all CPU's that are in middle of receiving packets
393 * will see the new packet type (until the next received packet).
394 */
395
396void dev_add_pack(struct packet_type *pt)
397{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000398 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000400 spin_lock(&ptype_lock);
401 list_add_rcu(&pt->list, head);
402 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700404EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406/**
407 * __dev_remove_pack - remove packet handler
408 * @pt: packet type declaration
409 *
410 * Remove a protocol handler that was previously added to the kernel
411 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
412 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900413 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 *
415 * The packet type might still be in use by receivers
416 * and must not be freed until after all the CPU's have gone
417 * through a quiescent state.
418 */
419void __dev_remove_pack(struct packet_type *pt)
420{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000421 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 struct packet_type *pt1;
423
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000424 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 list_for_each_entry(pt1, head, list) {
427 if (pt == pt1) {
428 list_del_rcu(&pt->list);
429 goto out;
430 }
431 }
432
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000433 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000435 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700437EXPORT_SYMBOL(__dev_remove_pack);
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439/**
440 * dev_remove_pack - remove packet handler
441 * @pt: packet type declaration
442 *
443 * Remove a protocol handler that was previously added to the kernel
444 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
445 * from the kernel lists and can be freed or reused once this function
446 * returns.
447 *
448 * This call sleeps to guarantee that no CPU is looking at the packet
449 * type after return.
450 */
451void dev_remove_pack(struct packet_type *pt)
452{
453 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 synchronize_net();
456}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700457EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Vlad Yasevich62532da2012-11-15 08:49:10 +0000459
460/**
461 * dev_add_offload - register offload handlers
462 * @po: protocol offload declaration
463 *
464 * Add protocol offload handlers to the networking stack. The passed
465 * &proto_offload is linked into kernel lists and may not be freed until
466 * it has been removed from the kernel lists.
467 *
468 * This call does not sleep therefore it can not
469 * guarantee all CPU's that are in middle of receiving packets
470 * will see the new offload handlers (until the next received packet).
471 */
472void dev_add_offload(struct packet_offload *po)
473{
David S. Millerbdef7de2015-06-01 14:56:09 -0700474 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000475
476 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700477 list_for_each_entry(elem, &offload_base, list) {
478 if (po->priority < elem->priority)
479 break;
480 }
481 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000482 spin_unlock(&offload_lock);
483}
484EXPORT_SYMBOL(dev_add_offload);
485
486/**
487 * __dev_remove_offload - remove offload handler
488 * @po: packet offload declaration
489 *
490 * Remove a protocol offload handler that was previously added to the
491 * kernel offload handlers by dev_add_offload(). The passed &offload_type
492 * is removed from the kernel lists and can be freed or reused once this
493 * function returns.
494 *
495 * The packet type might still be in use by receivers
496 * and must not be freed until after all the CPU's have gone
497 * through a quiescent state.
498 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800499static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000500{
501 struct list_head *head = &offload_base;
502 struct packet_offload *po1;
503
Eric Dumazetc53aa502012-11-16 08:08:23 +0000504 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000505
506 list_for_each_entry(po1, head, list) {
507 if (po == po1) {
508 list_del_rcu(&po->list);
509 goto out;
510 }
511 }
512
513 pr_warn("dev_remove_offload: %p not found\n", po);
514out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000515 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000516}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000517
518/**
519 * dev_remove_offload - remove packet offload handler
520 * @po: packet offload declaration
521 *
522 * Remove a packet offload handler that was previously added to the kernel
523 * offload handlers by dev_add_offload(). The passed &offload_type is
524 * removed from the kernel lists and can be freed or reused once this
525 * function returns.
526 *
527 * This call sleeps to guarantee that no CPU is looking at the packet
528 * type after return.
529 */
530void dev_remove_offload(struct packet_offload *po)
531{
532 __dev_remove_offload(po);
533
534 synchronize_net();
535}
536EXPORT_SYMBOL(dev_remove_offload);
537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538/******************************************************************************
539
540 Device Boot-time Settings Routines
541
542*******************************************************************************/
543
544/* Boot time configuration table */
545static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
546
547/**
548 * netdev_boot_setup_add - add new setup entry
549 * @name: name of the device
550 * @map: configured settings for the device
551 *
552 * Adds new setup entry to the dev_boot_setup list. The function
553 * returns 0 on error and 1 on success. This is a generic routine to
554 * all netdevices.
555 */
556static int netdev_boot_setup_add(char *name, struct ifmap *map)
557{
558 struct netdev_boot_setup *s;
559 int i;
560
561 s = dev_boot_setup;
562 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
563 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
564 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700565 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 memcpy(&s[i].map, map, sizeof(s[i].map));
567 break;
568 }
569 }
570
571 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
572}
573
574/**
575 * netdev_boot_setup_check - check boot time settings
576 * @dev: the netdevice
577 *
578 * Check boot time settings for the device.
579 * The found settings are set for the device to be used
580 * later in the device probing.
581 * Returns 0 if no settings found, 1 if they are.
582 */
583int netdev_boot_setup_check(struct net_device *dev)
584{
585 struct netdev_boot_setup *s = dev_boot_setup;
586 int i;
587
588 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
589 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700590 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 dev->irq = s[i].map.irq;
592 dev->base_addr = s[i].map.base_addr;
593 dev->mem_start = s[i].map.mem_start;
594 dev->mem_end = s[i].map.mem_end;
595 return 1;
596 }
597 }
598 return 0;
599}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700600EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602
603/**
604 * netdev_boot_base - get address from boot time settings
605 * @prefix: prefix for network device
606 * @unit: id for network device
607 *
608 * Check boot time settings for the base address of device.
609 * The found settings are set for the device to be used
610 * later in the device probing.
611 * Returns 0 if no settings found.
612 */
613unsigned long netdev_boot_base(const char *prefix, int unit)
614{
615 const struct netdev_boot_setup *s = dev_boot_setup;
616 char name[IFNAMSIZ];
617 int i;
618
619 sprintf(name, "%s%d", prefix, unit);
620
621 /*
622 * If device already registered then return base of 1
623 * to indicate not to probe for this interface
624 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700625 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 return 1;
627
628 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
629 if (!strcmp(name, s[i].name))
630 return s[i].map.base_addr;
631 return 0;
632}
633
634/*
635 * Saves at boot time configured settings for any netdevice.
636 */
637int __init netdev_boot_setup(char *str)
638{
639 int ints[5];
640 struct ifmap map;
641
642 str = get_options(str, ARRAY_SIZE(ints), ints);
643 if (!str || !*str)
644 return 0;
645
646 /* Save settings */
647 memset(&map, 0, sizeof(map));
648 if (ints[0] > 0)
649 map.irq = ints[1];
650 if (ints[0] > 1)
651 map.base_addr = ints[2];
652 if (ints[0] > 2)
653 map.mem_start = ints[3];
654 if (ints[0] > 3)
655 map.mem_end = ints[4];
656
657 /* Add new entry to the list */
658 return netdev_boot_setup_add(str, &map);
659}
660
661__setup("netdev=", netdev_boot_setup);
662
663/*******************************************************************************
664
665 Device Interface Subroutines
666
667*******************************************************************************/
668
669/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200670 * dev_get_iflink - get 'iflink' value of a interface
671 * @dev: targeted interface
672 *
673 * Indicates the ifindex the interface is linked to.
674 * Physical interfaces have the same 'ifindex' and 'iflink' values.
675 */
676
677int dev_get_iflink(const struct net_device *dev)
678{
679 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
680 return dev->netdev_ops->ndo_get_iflink(dev);
681
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200682 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200683}
684EXPORT_SYMBOL(dev_get_iflink);
685
686/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700687 * dev_fill_metadata_dst - Retrieve tunnel egress information.
688 * @dev: targeted interface
689 * @skb: The packet.
690 *
691 * For better visibility of tunnel traffic OVS needs to retrieve
692 * egress tunnel information for a packet. Following API allows
693 * user to get this info.
694 */
695int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
696{
697 struct ip_tunnel_info *info;
698
699 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
700 return -EINVAL;
701
702 info = skb_tunnel_info_unclone(skb);
703 if (!info)
704 return -ENOMEM;
705 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
706 return -EINVAL;
707
708 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
709}
710EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
711
712/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700714 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 * @name: name to find
716 *
717 * Find an interface by name. Must be called under RTNL semaphore
718 * or @dev_base_lock. If the name is found a pointer to the device
719 * is returned. If the name is not found then %NULL is returned. The
720 * reference counters are not incremented so the caller must be
721 * careful with locks.
722 */
723
Eric W. Biederman881d9662007-09-17 11:56:21 -0700724struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700726 struct net_device *dev;
727 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Sasha Levinb67bfe02013-02-27 17:06:00 -0800729 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 if (!strncmp(dev->name, name, IFNAMSIZ))
731 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700732
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return NULL;
734}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700735EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000738 * dev_get_by_name_rcu - find a device by its name
739 * @net: the applicable net namespace
740 * @name: name to find
741 *
742 * Find an interface by name.
743 * If the name is found a pointer to the device is returned.
744 * If the name is not found then %NULL is returned.
745 * The reference counters are not incremented so the caller must be
746 * careful with locks. The caller must hold RCU lock.
747 */
748
749struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
750{
Eric Dumazet72c95282009-10-30 07:11:27 +0000751 struct net_device *dev;
752 struct hlist_head *head = dev_name_hash(net, name);
753
Sasha Levinb67bfe02013-02-27 17:06:00 -0800754 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000755 if (!strncmp(dev->name, name, IFNAMSIZ))
756 return dev;
757
758 return NULL;
759}
760EXPORT_SYMBOL(dev_get_by_name_rcu);
761
762/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700764 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 * @name: name to find
766 *
767 * Find an interface by name. This can be called from any
768 * context and does its own locking. The returned handle has
769 * the usage count incremented and the caller must use dev_put() to
770 * release it when it is no longer needed. %NULL is returned if no
771 * matching device is found.
772 */
773
Eric W. Biederman881d9662007-09-17 11:56:21 -0700774struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
776 struct net_device *dev;
777
Eric Dumazet72c95282009-10-30 07:11:27 +0000778 rcu_read_lock();
779 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (dev)
781 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000782 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return dev;
784}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700785EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787/**
788 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700789 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 * @ifindex: index of device
791 *
792 * Search for an interface by index. Returns %NULL if the device
793 * is not found or a pointer to the device. The device has not
794 * had its reference counter increased so the caller must be careful
795 * about locking. The caller must hold either the RTNL semaphore
796 * or @dev_base_lock.
797 */
798
Eric W. Biederman881d9662007-09-17 11:56:21 -0700799struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700801 struct net_device *dev;
802 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Sasha Levinb67bfe02013-02-27 17:06:00 -0800804 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 if (dev->ifindex == ifindex)
806 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 return NULL;
809}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700810EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000812/**
813 * dev_get_by_index_rcu - find a device by its ifindex
814 * @net: the applicable net namespace
815 * @ifindex: index of device
816 *
817 * Search for an interface by index. Returns %NULL if the device
818 * is not found or a pointer to the device. The device has not
819 * had its reference counter increased so the caller must be careful
820 * about locking. The caller must hold RCU lock.
821 */
822
823struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
824{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000825 struct net_device *dev;
826 struct hlist_head *head = dev_index_hash(net, ifindex);
827
Sasha Levinb67bfe02013-02-27 17:06:00 -0800828 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000829 if (dev->ifindex == ifindex)
830 return dev;
831
832 return NULL;
833}
834EXPORT_SYMBOL(dev_get_by_index_rcu);
835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837/**
838 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700839 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * @ifindex: index of device
841 *
842 * Search for an interface by index. Returns NULL if the device
843 * is not found or a pointer to the device. The device returned has
844 * had a reference added and the pointer is safe until the user calls
845 * dev_put to indicate they have finished with it.
846 */
847
Eric W. Biederman881d9662007-09-17 11:56:21 -0700848struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
850 struct net_device *dev;
851
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000852 rcu_read_lock();
853 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 if (dev)
855 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000856 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return dev;
858}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700859EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
861/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200862 * netdev_get_name - get a netdevice name, knowing its ifindex.
863 * @net: network namespace
864 * @name: a pointer to the buffer where the name will be stored.
865 * @ifindex: the ifindex of the interface to get the name from.
866 *
867 * The use of raw_seqcount_begin() and cond_resched() before
868 * retrying is required as we want to give the writers a chance
869 * to complete when CONFIG_PREEMPT is not set.
870 */
871int netdev_get_name(struct net *net, char *name, int ifindex)
872{
873 struct net_device *dev;
874 unsigned int seq;
875
876retry:
877 seq = raw_seqcount_begin(&devnet_rename_seq);
878 rcu_read_lock();
879 dev = dev_get_by_index_rcu(net, ifindex);
880 if (!dev) {
881 rcu_read_unlock();
882 return -ENODEV;
883 }
884
885 strcpy(name, dev->name);
886 rcu_read_unlock();
887 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
888 cond_resched();
889 goto retry;
890 }
891
892 return 0;
893}
894
895/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000896 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700897 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 * @type: media type of device
899 * @ha: hardware address
900 *
901 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800902 * is not found or a pointer to the device.
903 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000904 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 * and the caller must therefore be careful about locking
906 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 */
908
Eric Dumazet941666c2010-12-05 01:23:53 +0000909struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
910 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
912 struct net_device *dev;
913
Eric Dumazet941666c2010-12-05 01:23:53 +0000914 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 if (dev->type == type &&
916 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700917 return dev;
918
919 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920}
Eric Dumazet941666c2010-12-05 01:23:53 +0000921EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300922
Eric W. Biederman881d9662007-09-17 11:56:21 -0700923struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700924{
925 struct net_device *dev;
926
927 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700928 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700929 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700930 return dev;
931
932 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700933}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700934EXPORT_SYMBOL(__dev_getfirstbyhwtype);
935
Eric W. Biederman881d9662007-09-17 11:56:21 -0700936struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000938 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000940 rcu_read_lock();
941 for_each_netdev_rcu(net, dev)
942 if (dev->type == type) {
943 dev_hold(dev);
944 ret = dev;
945 break;
946 }
947 rcu_read_unlock();
948 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950EXPORT_SYMBOL(dev_getfirstbyhwtype);
951
952/**
WANG Cong6c555492014-09-11 15:35:09 -0700953 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700954 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 * @if_flags: IFF_* values
956 * @mask: bitmask of bits in if_flags to check
957 *
958 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000959 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700960 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 */
962
WANG Cong6c555492014-09-11 15:35:09 -0700963struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
964 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700966 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
WANG Cong6c555492014-09-11 15:35:09 -0700968 ASSERT_RTNL();
969
Pavel Emelianov7562f872007-05-03 15:13:45 -0700970 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -0700971 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700973 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 break;
975 }
976 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700977 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978}
WANG Cong6c555492014-09-11 15:35:09 -0700979EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
981/**
982 * dev_valid_name - check if name is okay for network device
983 * @name: name string
984 *
985 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700986 * to allow sysfs to work. We also disallow any kind of
987 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 */
David S. Miller95f050b2012-03-06 16:12:15 -0500989bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700991 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500992 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700993 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500994 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700995 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500996 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700997
998 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -0600999 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001000 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001001 name++;
1002 }
David S. Miller95f050b2012-03-06 16:12:15 -05001003 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001005EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001008 * __dev_alloc_name - allocate a name for a device
1009 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001011 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 *
1013 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001014 * id. It scans list of devices to build up a free map, then chooses
1015 * the first empty slot. The caller must hold the dev_base or rtnl lock
1016 * while allocating the name and adding the device in order to avoid
1017 * duplicates.
1018 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1019 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 */
1021
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001022static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023{
1024 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 const char *p;
1026 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001027 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 struct net_device *d;
1029
1030 p = strnchr(name, IFNAMSIZ-1, '%');
1031 if (p) {
1032 /*
1033 * Verify the string as this thing may have come from
1034 * the user. There must be either one "%d" and no other "%"
1035 * characters.
1036 */
1037 if (p[1] != 'd' || strchr(p + 2, '%'))
1038 return -EINVAL;
1039
1040 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001041 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 if (!inuse)
1043 return -ENOMEM;
1044
Eric W. Biederman881d9662007-09-17 11:56:21 -07001045 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 if (!sscanf(d->name, name, &i))
1047 continue;
1048 if (i < 0 || i >= max_netdevices)
1049 continue;
1050
1051 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001052 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 if (!strncmp(buf, d->name, IFNAMSIZ))
1054 set_bit(i, inuse);
1055 }
1056
1057 i = find_first_zero_bit(inuse, max_netdevices);
1058 free_page((unsigned long) inuse);
1059 }
1060
Octavian Purdilad9031022009-11-18 02:36:59 +00001061 if (buf != name)
1062 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001063 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
1066 /* It is possible to run out of possible slots
1067 * when the name is long and there isn't enough space left
1068 * for the digits, or if all bits are used.
1069 */
1070 return -ENFILE;
1071}
1072
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001073/**
1074 * dev_alloc_name - allocate a name for a device
1075 * @dev: device
1076 * @name: name format string
1077 *
1078 * Passed a format string - eg "lt%d" it will try and find a suitable
1079 * id. It scans list of devices to build up a free map, then chooses
1080 * the first empty slot. The caller must hold the dev_base or rtnl lock
1081 * while allocating the name and adding the device in order to avoid
1082 * duplicates.
1083 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1084 * Returns the number of the unit assigned or a negative errno code.
1085 */
1086
1087int dev_alloc_name(struct net_device *dev, const char *name)
1088{
1089 char buf[IFNAMSIZ];
1090 struct net *net;
1091 int ret;
1092
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001093 BUG_ON(!dev_net(dev));
1094 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001095 ret = __dev_alloc_name(net, name, buf);
1096 if (ret >= 0)
1097 strlcpy(dev->name, buf, IFNAMSIZ);
1098 return ret;
1099}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001100EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001101
Gao feng828de4f2012-09-13 20:58:27 +00001102static int dev_alloc_name_ns(struct net *net,
1103 struct net_device *dev,
1104 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001105{
Gao feng828de4f2012-09-13 20:58:27 +00001106 char buf[IFNAMSIZ];
1107 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001108
Gao feng828de4f2012-09-13 20:58:27 +00001109 ret = __dev_alloc_name(net, name, buf);
1110 if (ret >= 0)
1111 strlcpy(dev->name, buf, IFNAMSIZ);
1112 return ret;
1113}
1114
1115static int dev_get_valid_name(struct net *net,
1116 struct net_device *dev,
1117 const char *name)
1118{
1119 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001120
Octavian Purdilad9031022009-11-18 02:36:59 +00001121 if (!dev_valid_name(name))
1122 return -EINVAL;
1123
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001124 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001125 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001126 else if (__dev_get_by_name(net, name))
1127 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001128 else if (dev->name != name)
1129 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001130
1131 return 0;
1132}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
1134/**
1135 * dev_change_name - change name of a device
1136 * @dev: device
1137 * @newname: name (or format string) must be at least IFNAMSIZ
1138 *
1139 * Change name of a device, can pass format strings "eth%d".
1140 * for wildcarding.
1141 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001142int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143{
Tom Gundersen238fa362014-07-14 16:37:23 +02001144 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001145 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001147 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001148 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
1150 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001151 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001153 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 if (dev->flags & IFF_UP)
1155 return -EBUSY;
1156
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001157 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001158
1159 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001160 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001161 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001162 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001163
Herbert Xufcc5a032007-07-30 17:03:38 -07001164 memcpy(oldname, dev->name, IFNAMSIZ);
1165
Gao feng828de4f2012-09-13 20:58:27 +00001166 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001167 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001168 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001169 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001172 if (oldname[0] && !strchr(oldname, '%'))
1173 netdev_info(dev, "renamed from %s\n", oldname);
1174
Tom Gundersen238fa362014-07-14 16:37:23 +02001175 old_assign_type = dev->name_assign_type;
1176 dev->name_assign_type = NET_NAME_RENAMED;
1177
Herbert Xufcc5a032007-07-30 17:03:38 -07001178rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001179 ret = device_rename(&dev->dev, dev->name);
1180 if (ret) {
1181 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001182 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001183 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001184 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001185 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001186
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001187 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001188
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001189 netdev_adjacent_rename_links(dev, oldname);
1190
Herbert Xu7f988ea2007-07-30 16:35:46 -07001191 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001192 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001193 write_unlock_bh(&dev_base_lock);
1194
1195 synchronize_rcu();
1196
1197 write_lock_bh(&dev_base_lock);
1198 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001199 write_unlock_bh(&dev_base_lock);
1200
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001201 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001202 ret = notifier_to_errno(ret);
1203
1204 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001205 /* err >= 0 after dev_alloc_name() or stores the first errno */
1206 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001207 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001208 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001209 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001210 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001211 dev->name_assign_type = old_assign_type;
1212 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001213 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001214 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001215 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001216 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001217 }
1218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 return err;
1221}
1222
1223/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001224 * dev_set_alias - change ifalias of a device
1225 * @dev: device
1226 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001227 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001228 *
1229 * Set ifalias for a device,
1230 */
1231int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1232{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001233 char *new_ifalias;
1234
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001235 ASSERT_RTNL();
1236
1237 if (len >= IFALIASZ)
1238 return -EINVAL;
1239
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001240 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001241 kfree(dev->ifalias);
1242 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001243 return 0;
1244 }
1245
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001246 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1247 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001248 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001249 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001250
1251 strlcpy(dev->ifalias, alias, len+1);
1252 return len;
1253}
1254
1255
1256/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001257 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001258 * @dev: device to cause notification
1259 *
1260 * Called to indicate a device has changed features.
1261 */
1262void netdev_features_change(struct net_device *dev)
1263{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001264 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001265}
1266EXPORT_SYMBOL(netdev_features_change);
1267
1268/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 * netdev_state_change - device changes state
1270 * @dev: device to cause notification
1271 *
1272 * Called to indicate a device has changed state. This function calls
1273 * the notifier chains for netdev_chain and sends a NEWLINK message
1274 * to the routing socket.
1275 */
1276void netdev_state_change(struct net_device *dev)
1277{
1278 if (dev->flags & IFF_UP) {
Loic Prylli5495119462014-07-01 21:39:43 -07001279 struct netdev_notifier_change_info change_info;
1280
1281 change_info.flags_changed = 0;
1282 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1283 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001284 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 }
1286}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001287EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Amerigo Wangee89bab2012-08-09 22:14:56 +00001289/**
1290 * netdev_notify_peers - notify network peers about existence of @dev
1291 * @dev: network device
1292 *
1293 * Generate traffic such that interested network peers are aware of
1294 * @dev, such as by generating a gratuitous ARP. This may be used when
1295 * a device wants to inform the rest of the network about some sort of
1296 * reconfiguration such as a failover event or virtual machine
1297 * migration.
1298 */
1299void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001300{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001301 rtnl_lock();
1302 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1303 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001304}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001305EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001306
Patrick McHardybd380812010-02-26 06:34:53 +00001307static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001309 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001310 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001312 ASSERT_RTNL();
1313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 if (!netif_device_present(dev))
1315 return -ENODEV;
1316
Neil Hormanca99ca12013-02-05 08:05:43 +00001317 /* Block netpoll from trying to do any rx path servicing.
1318 * If we don't do this there is a chance ndo_poll_controller
1319 * or ndo_poll may be running while we open the device
1320 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001321 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001322
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001323 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1324 ret = notifier_to_errno(ret);
1325 if (ret)
1326 return ret;
1327
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001329
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001330 if (ops->ndo_validate_addr)
1331 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001332
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001333 if (!ret && ops->ndo_open)
1334 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Eric W. Biederman66b55522014-03-27 15:39:03 -07001336 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001337
Jeff Garzikbada3392007-10-23 20:19:37 -07001338 if (ret)
1339 clear_bit(__LINK_STATE_START, &dev->state);
1340 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001342 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001344 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 return ret;
1348}
Patrick McHardybd380812010-02-26 06:34:53 +00001349
1350/**
1351 * dev_open - prepare an interface for use.
1352 * @dev: device to open
1353 *
1354 * Takes a device from down to up state. The device's private open
1355 * function is invoked and then the multicast lists are loaded. Finally
1356 * the device is moved into the up state and a %NETDEV_UP message is
1357 * sent to the netdev notifier chain.
1358 *
1359 * Calling this function on an active interface is a nop. On a failure
1360 * a negative errno code is returned.
1361 */
1362int dev_open(struct net_device *dev)
1363{
1364 int ret;
1365
Patrick McHardybd380812010-02-26 06:34:53 +00001366 if (dev->flags & IFF_UP)
1367 return 0;
1368
Patrick McHardybd380812010-02-26 06:34:53 +00001369 ret = __dev_open(dev);
1370 if (ret < 0)
1371 return ret;
1372
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001373 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001374 call_netdevice_notifiers(NETDEV_UP, dev);
1375
1376 return ret;
1377}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001378EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Octavian Purdila44345722010-12-13 12:44:07 +00001380static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381{
Octavian Purdila44345722010-12-13 12:44:07 +00001382 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001383
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001384 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001385 might_sleep();
1386
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001387 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001388 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001389 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001390
Octavian Purdila44345722010-12-13 12:44:07 +00001391 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
Octavian Purdila44345722010-12-13 12:44:07 +00001393 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
Octavian Purdila44345722010-12-13 12:44:07 +00001395 /* Synchronize to scheduled poll. We cannot touch poll list, it
1396 * can be even on different cpu. So just clear netif_running().
1397 *
1398 * dev->stop() will invoke napi_disable() on all of it's
1399 * napi_struct instances on this device.
1400 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001401 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
Octavian Purdila44345722010-12-13 12:44:07 +00001404 dev_deactivate_many(head);
1405
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001406 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001407 const struct net_device_ops *ops = dev->netdev_ops;
1408
1409 /*
1410 * Call the device specific close. This cannot fail.
1411 * Only if device is UP
1412 *
1413 * We allow it to be called even after a DETACH hot-plug
1414 * event.
1415 */
1416 if (ops->ndo_stop)
1417 ops->ndo_stop(dev);
1418
Octavian Purdila44345722010-12-13 12:44:07 +00001419 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001420 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001421 }
1422
1423 return 0;
1424}
1425
1426static int __dev_close(struct net_device *dev)
1427{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001428 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001429 LIST_HEAD(single);
1430
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001431 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001432 retval = __dev_close_many(&single);
1433 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001434
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001435 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001436}
1437
David S. Miller99c4a262015-03-18 22:52:33 -04001438int dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001439{
1440 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001441
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001442 /* Remove the devices that don't need to be closed */
1443 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001444 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001445 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001446
1447 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001448
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001449 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001450 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001451 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001452 if (unlink)
1453 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 return 0;
1457}
David S. Miller99c4a262015-03-18 22:52:33 -04001458EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001459
1460/**
1461 * dev_close - shutdown an interface.
1462 * @dev: device to shutdown
1463 *
1464 * This function moves an active device into down state. A
1465 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1466 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1467 * chain.
1468 */
1469int dev_close(struct net_device *dev)
1470{
Eric Dumazete14a5992011-05-10 12:26:06 -07001471 if (dev->flags & IFF_UP) {
1472 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001473
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001474 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001475 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001476 list_del(&single);
1477 }
dingtianhongda6e3782013-05-27 19:53:31 +00001478 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001479}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001480EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
1482
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001483/**
1484 * dev_disable_lro - disable Large Receive Offload on a device
1485 * @dev: device
1486 *
1487 * Disable Large Receive Offload (LRO) on a net device. Must be
1488 * called under RTNL. This is needed if received packets may be
1489 * forwarded to another interface.
1490 */
1491void dev_disable_lro(struct net_device *dev)
1492{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001493 struct net_device *lower_dev;
1494 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001495
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001496 dev->wanted_features &= ~NETIF_F_LRO;
1497 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001498
Michał Mirosław22d59692011-04-21 12:42:15 +00001499 if (unlikely(dev->features & NETIF_F_LRO))
1500 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001501
1502 netdev_for_each_lower_dev(dev, lower_dev, iter)
1503 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001504}
1505EXPORT_SYMBOL(dev_disable_lro);
1506
Jiri Pirko351638e2013-05-28 01:30:21 +00001507static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1508 struct net_device *dev)
1509{
1510 struct netdev_notifier_info info;
1511
1512 netdev_notifier_info_init(&info, dev);
1513 return nb->notifier_call(nb, val, &info);
1514}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001515
Eric W. Biederman881d9662007-09-17 11:56:21 -07001516static int dev_boot_phase = 1;
1517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518/**
1519 * register_netdevice_notifier - register a network notifier block
1520 * @nb: notifier
1521 *
1522 * Register a notifier to be called when network device events occur.
1523 * The notifier passed is linked into the kernel structures and must
1524 * not be reused until it has been unregistered. A negative errno code
1525 * is returned on a failure.
1526 *
1527 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001528 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 * view of the network device list.
1530 */
1531
1532int register_netdevice_notifier(struct notifier_block *nb)
1533{
1534 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001535 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001536 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 int err;
1538
1539 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001540 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001541 if (err)
1542 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001543 if (dev_boot_phase)
1544 goto unlock;
1545 for_each_net(net) {
1546 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001547 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001548 err = notifier_to_errno(err);
1549 if (err)
1550 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
Eric W. Biederman881d9662007-09-17 11:56:21 -07001552 if (!(dev->flags & IFF_UP))
1553 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001554
Jiri Pirko351638e2013-05-28 01:30:21 +00001555 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001558
1559unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 rtnl_unlock();
1561 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001562
1563rollback:
1564 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001565 for_each_net(net) {
1566 for_each_netdev(net, dev) {
1567 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001568 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001569
Eric W. Biederman881d9662007-09-17 11:56:21 -07001570 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001571 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1572 dev);
1573 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001574 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001575 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001576 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001577 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001578
RongQing.Li8f891482011-11-30 23:43:07 -05001579outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001580 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001581 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001583EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
1585/**
1586 * unregister_netdevice_notifier - unregister a network notifier block
1587 * @nb: notifier
1588 *
1589 * Unregister a notifier previously registered by
1590 * register_netdevice_notifier(). The notifier is unlinked into the
1591 * kernel structures and may then be reused. A negative errno code
1592 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001593 *
1594 * After unregistering unregister and down device events are synthesized
1595 * for all devices on the device list to the removed notifier to remove
1596 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 */
1598
1599int unregister_netdevice_notifier(struct notifier_block *nb)
1600{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001601 struct net_device *dev;
1602 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001603 int err;
1604
1605 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001606 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001607 if (err)
1608 goto unlock;
1609
1610 for_each_net(net) {
1611 for_each_netdev(net, dev) {
1612 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001613 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1614 dev);
1615 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001616 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001617 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001618 }
1619 }
1620unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001621 rtnl_unlock();
1622 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001624EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
1626/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001627 * call_netdevice_notifiers_info - call all network notifier blocks
1628 * @val: value passed unmodified to notifier function
1629 * @dev: net_device pointer passed unmodified to notifier function
1630 * @info: notifier information data
1631 *
1632 * Call all network notifier blocks. Parameters and return value
1633 * are as for raw_notifier_call_chain().
1634 */
1635
stephen hemminger1d143d92013-12-29 14:01:29 -08001636static int call_netdevice_notifiers_info(unsigned long val,
1637 struct net_device *dev,
1638 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001639{
1640 ASSERT_RTNL();
1641 netdev_notifier_info_init(info, dev);
1642 return raw_notifier_call_chain(&netdev_chain, val, info);
1643}
Jiri Pirko351638e2013-05-28 01:30:21 +00001644
1645/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 * call_netdevice_notifiers - call all network notifier blocks
1647 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001648 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 *
1650 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001651 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 */
1653
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001654int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655{
Jiri Pirko351638e2013-05-28 01:30:21 +00001656 struct netdev_notifier_info info;
1657
1658 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001660EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
Pablo Neira1cf519002015-05-13 18:19:37 +02001662#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02001663static struct static_key ingress_needed __read_mostly;
1664
1665void net_inc_ingress_queue(void)
1666{
1667 static_key_slow_inc(&ingress_needed);
1668}
1669EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1670
1671void net_dec_ingress_queue(void)
1672{
1673 static_key_slow_dec(&ingress_needed);
1674}
1675EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1676#endif
1677
Ingo Molnarc5905af2012-02-24 08:31:31 +01001678static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001679#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001680/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001681 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001682 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001683 */
1684static atomic_t netstamp_needed_deferred;
1685#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687void net_enable_timestamp(void)
1688{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001689#ifdef HAVE_JUMP_LABEL
1690 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1691
1692 if (deferred) {
1693 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001694 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001695 return;
1696 }
1697#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001698 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001700EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
1702void net_disable_timestamp(void)
1703{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001704#ifdef HAVE_JUMP_LABEL
1705 if (in_interrupt()) {
1706 atomic_inc(&netstamp_needed_deferred);
1707 return;
1708 }
1709#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001710 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001712EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
Eric Dumazet3b098e22010-05-15 23:57:10 -07001714static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715{
Eric Dumazet588f0332011-11-15 04:12:55 +00001716 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001717 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001718 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719}
1720
Eric Dumazet588f0332011-11-15 04:12:55 +00001721#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001722 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001723 if ((COND) && !(SKB)->tstamp.tv64) \
1724 __net_timestamp(SKB); \
1725 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001726
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001727bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001728{
1729 unsigned int len;
1730
1731 if (!(dev->flags & IFF_UP))
1732 return false;
1733
1734 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1735 if (skb->len <= len)
1736 return true;
1737
1738 /* if TSO is enabled, we don't care about the length as the packet
1739 * could be forwarded without being segmented before
1740 */
1741 if (skb_is_gso(skb))
1742 return true;
1743
1744 return false;
1745}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001746EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001747
Herbert Xua0265d22014-04-17 13:45:03 +08001748int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1749{
Willem de Bruijnbbbf2df2015-06-08 11:53:08 -04001750 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1751 unlikely(!is_skb_forwardable(dev, skb))) {
Herbert Xua0265d22014-04-17 13:45:03 +08001752 atomic_long_inc(&dev->rx_dropped);
1753 kfree_skb(skb);
1754 return NET_RX_DROP;
1755 }
1756
1757 skb_scrub_packet(skb, true);
WANG Cong08b4b8e2015-03-20 14:29:09 -07001758 skb->priority = 0;
Herbert Xua0265d22014-04-17 13:45:03 +08001759 skb->protocol = eth_type_trans(skb, dev);
Jay Vosburgh2c26d342014-12-19 15:32:00 -08001760 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001761
1762 return 0;
1763}
1764EXPORT_SYMBOL_GPL(__dev_forward_skb);
1765
Arnd Bergmann44540962009-11-26 06:07:08 +00001766/**
1767 * dev_forward_skb - loopback an skb to another netif
1768 *
1769 * @dev: destination network device
1770 * @skb: buffer to forward
1771 *
1772 * return values:
1773 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001774 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001775 *
1776 * dev_forward_skb can be used for injecting an skb from the
1777 * start_xmit function of one device into the receive queue
1778 * of another device.
1779 *
1780 * The receiving device may be in another namespace, so
1781 * we have to clear all information in the skb that could
1782 * impact namespace isolation.
1783 */
1784int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1785{
Herbert Xua0265d22014-04-17 13:45:03 +08001786 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001787}
1788EXPORT_SYMBOL_GPL(dev_forward_skb);
1789
Changli Gao71d9dec2010-12-15 19:57:25 +00001790static inline int deliver_skb(struct sk_buff *skb,
1791 struct packet_type *pt_prev,
1792 struct net_device *orig_dev)
1793{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001794 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1795 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001796 atomic_inc(&skb->users);
1797 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1798}
1799
Salam Noureddine7866a622015-01-27 11:35:48 -08001800static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1801 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001802 struct net_device *orig_dev,
1803 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001804 struct list_head *ptype_list)
1805{
1806 struct packet_type *ptype, *pt_prev = *pt;
1807
1808 list_for_each_entry_rcu(ptype, ptype_list, list) {
1809 if (ptype->type != type)
1810 continue;
1811 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001812 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001813 pt_prev = ptype;
1814 }
1815 *pt = pt_prev;
1816}
1817
Eric Leblondc0de08d2012-08-16 22:02:58 +00001818static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1819{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001820 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001821 return false;
1822
1823 if (ptype->id_match)
1824 return ptype->id_match(ptype, skb->sk);
1825 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1826 return true;
1827
1828 return false;
1829}
1830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831/*
1832 * Support routine. Sends outgoing frames to any network
1833 * taps currently in use.
1834 */
1835
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001836static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837{
1838 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001839 struct sk_buff *skb2 = NULL;
1840 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001841 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001844again:
1845 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 /* Never send packets back to the socket
1847 * they originated from - MvS (miquels@drinkel.ow.org)
1848 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001849 if (skb_loop_sk(ptype, skb))
1850 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001851
Salam Noureddine7866a622015-01-27 11:35:48 -08001852 if (pt_prev) {
1853 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001854 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001855 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001857
1858 /* need to clone skb, done only once */
1859 skb2 = skb_clone(skb, GFP_ATOMIC);
1860 if (!skb2)
1861 goto out_unlock;
1862
1863 net_timestamp_set(skb2);
1864
1865 /* skb->nh should be correctly
1866 * set by sender, so that the second statement is
1867 * just protection against buggy protocols.
1868 */
1869 skb_reset_mac_header(skb2);
1870
1871 if (skb_network_header(skb2) < skb2->data ||
1872 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1873 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1874 ntohs(skb2->protocol),
1875 dev->name);
1876 skb_reset_network_header(skb2);
1877 }
1878
1879 skb2->transport_header = skb2->network_header;
1880 skb2->pkt_type = PACKET_OUTGOING;
1881 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001883
1884 if (ptype_list == &ptype_all) {
1885 ptype_list = &dev->ptype_all;
1886 goto again;
1887 }
1888out_unlock:
Changli Gao71d9dec2010-12-15 19:57:25 +00001889 if (pt_prev)
1890 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 rcu_read_unlock();
1892}
1893
Ben Hutchings2c530402012-07-10 10:55:09 +00001894/**
1895 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001896 * @dev: Network device
1897 * @txq: number of queues available
1898 *
1899 * If real_num_tx_queues is changed the tc mappings may no longer be
1900 * valid. To resolve this verify the tc mapping remains valid and if
1901 * not NULL the mapping. With no priorities mapping to this
1902 * offset/count pair it will no longer be used. In the worst case TC0
1903 * is invalid nothing can be done so disable priority mappings. If is
1904 * expected that drivers will fix this mapping if they can before
1905 * calling netif_set_real_num_tx_queues.
1906 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001907static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001908{
1909 int i;
1910 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1911
1912 /* If TC0 is invalidated disable TC mapping */
1913 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001914 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001915 dev->num_tc = 0;
1916 return;
1917 }
1918
1919 /* Invalidated prio to tc mappings set to TC0 */
1920 for (i = 1; i < TC_BITMASK + 1; i++) {
1921 int q = netdev_get_prio_tc_map(dev, i);
1922
1923 tc = &dev->tc_to_txq[q];
1924 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001925 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1926 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001927 netdev_set_prio_tc_map(dev, i, 0);
1928 }
1929 }
1930}
1931
Alexander Duyck537c00d2013-01-10 08:57:02 +00001932#ifdef CONFIG_XPS
1933static DEFINE_MUTEX(xps_map_mutex);
1934#define xmap_dereference(P) \
1935 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1936
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001937static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1938 int cpu, u16 index)
1939{
1940 struct xps_map *map = NULL;
1941 int pos;
1942
1943 if (dev_maps)
1944 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1945
1946 for (pos = 0; map && pos < map->len; pos++) {
1947 if (map->queues[pos] == index) {
1948 if (map->len > 1) {
1949 map->queues[pos] = map->queues[--map->len];
1950 } else {
1951 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1952 kfree_rcu(map, rcu);
1953 map = NULL;
1954 }
1955 break;
1956 }
1957 }
1958
1959 return map;
1960}
1961
Alexander Duyck024e9672013-01-10 08:57:46 +00001962static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001963{
1964 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001965 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001966 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001967
1968 mutex_lock(&xps_map_mutex);
1969 dev_maps = xmap_dereference(dev->xps_maps);
1970
1971 if (!dev_maps)
1972 goto out_no_maps;
1973
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001974 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001975 for (i = index; i < dev->num_tx_queues; i++) {
1976 if (!remove_xps_queue(dev_maps, cpu, i))
1977 break;
1978 }
1979 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001980 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001981 }
1982
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001983 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001984 RCU_INIT_POINTER(dev->xps_maps, NULL);
1985 kfree_rcu(dev_maps, rcu);
1986 }
1987
Alexander Duyck024e9672013-01-10 08:57:46 +00001988 for (i = index; i < dev->num_tx_queues; i++)
1989 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1990 NUMA_NO_NODE);
1991
Alexander Duyck537c00d2013-01-10 08:57:02 +00001992out_no_maps:
1993 mutex_unlock(&xps_map_mutex);
1994}
1995
Alexander Duyck01c5f862013-01-10 08:57:35 +00001996static struct xps_map *expand_xps_map(struct xps_map *map,
1997 int cpu, u16 index)
1998{
1999 struct xps_map *new_map;
2000 int alloc_len = XPS_MIN_MAP_ALLOC;
2001 int i, pos;
2002
2003 for (pos = 0; map && pos < map->len; pos++) {
2004 if (map->queues[pos] != index)
2005 continue;
2006 return map;
2007 }
2008
2009 /* Need to add queue to this CPU's existing map */
2010 if (map) {
2011 if (pos < map->alloc_len)
2012 return map;
2013
2014 alloc_len = map->alloc_len * 2;
2015 }
2016
2017 /* Need to allocate new map to store queue on this CPU's map */
2018 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2019 cpu_to_node(cpu));
2020 if (!new_map)
2021 return NULL;
2022
2023 for (i = 0; i < pos; i++)
2024 new_map->queues[i] = map->queues[i];
2025 new_map->alloc_len = alloc_len;
2026 new_map->len = pos;
2027
2028 return new_map;
2029}
2030
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002031int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2032 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002033{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002034 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002035 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002036 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002037 int cpu, numa_node_id = -2;
2038 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002039
2040 mutex_lock(&xps_map_mutex);
2041
2042 dev_maps = xmap_dereference(dev->xps_maps);
2043
Alexander Duyck01c5f862013-01-10 08:57:35 +00002044 /* allocate memory for queue storage */
2045 for_each_online_cpu(cpu) {
2046 if (!cpumask_test_cpu(cpu, mask))
2047 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002048
Alexander Duyck01c5f862013-01-10 08:57:35 +00002049 if (!new_dev_maps)
2050 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002051 if (!new_dev_maps) {
2052 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002053 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002054 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002055
2056 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2057 NULL;
2058
2059 map = expand_xps_map(map, cpu, index);
2060 if (!map)
2061 goto error;
2062
2063 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2064 }
2065
2066 if (!new_dev_maps)
2067 goto out_no_new_maps;
2068
2069 for_each_possible_cpu(cpu) {
2070 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2071 /* add queue to CPU maps */
2072 int pos = 0;
2073
2074 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2075 while ((pos < map->len) && (map->queues[pos] != index))
2076 pos++;
2077
2078 if (pos == map->len)
2079 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002080#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002081 if (numa_node_id == -2)
2082 numa_node_id = cpu_to_node(cpu);
2083 else if (numa_node_id != cpu_to_node(cpu))
2084 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002085#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002086 } else if (dev_maps) {
2087 /* fill in the new device map from the old device map */
2088 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2089 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002090 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002091
Alexander Duyck537c00d2013-01-10 08:57:02 +00002092 }
2093
Alexander Duyck01c5f862013-01-10 08:57:35 +00002094 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2095
Alexander Duyck537c00d2013-01-10 08:57:02 +00002096 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002097 if (dev_maps) {
2098 for_each_possible_cpu(cpu) {
2099 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2100 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2101 if (map && map != new_map)
2102 kfree_rcu(map, rcu);
2103 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002104
Alexander Duyck537c00d2013-01-10 08:57:02 +00002105 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002106 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002107
Alexander Duyck01c5f862013-01-10 08:57:35 +00002108 dev_maps = new_dev_maps;
2109 active = true;
2110
2111out_no_new_maps:
2112 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002113 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2114 (numa_node_id >= 0) ? numa_node_id :
2115 NUMA_NO_NODE);
2116
Alexander Duyck01c5f862013-01-10 08:57:35 +00002117 if (!dev_maps)
2118 goto out_no_maps;
2119
2120 /* removes queue from unused CPUs */
2121 for_each_possible_cpu(cpu) {
2122 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2123 continue;
2124
2125 if (remove_xps_queue(dev_maps, cpu, index))
2126 active = true;
2127 }
2128
2129 /* free map if not active */
2130 if (!active) {
2131 RCU_INIT_POINTER(dev->xps_maps, NULL);
2132 kfree_rcu(dev_maps, rcu);
2133 }
2134
2135out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002136 mutex_unlock(&xps_map_mutex);
2137
2138 return 0;
2139error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002140 /* remove any maps that we added */
2141 for_each_possible_cpu(cpu) {
2142 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2143 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2144 NULL;
2145 if (new_map && new_map != map)
2146 kfree(new_map);
2147 }
2148
Alexander Duyck537c00d2013-01-10 08:57:02 +00002149 mutex_unlock(&xps_map_mutex);
2150
Alexander Duyck537c00d2013-01-10 08:57:02 +00002151 kfree(new_dev_maps);
2152 return -ENOMEM;
2153}
2154EXPORT_SYMBOL(netif_set_xps_queue);
2155
2156#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002157/*
2158 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2159 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2160 */
Tom Herberte6484932010-10-18 18:04:39 +00002161int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002162{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002163 int rc;
2164
Tom Herberte6484932010-10-18 18:04:39 +00002165 if (txq < 1 || txq > dev->num_tx_queues)
2166 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002167
Ben Hutchings5c565802011-02-15 19:39:21 +00002168 if (dev->reg_state == NETREG_REGISTERED ||
2169 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002170 ASSERT_RTNL();
2171
Tom Herbert1d24eb42010-11-21 13:17:27 +00002172 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2173 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002174 if (rc)
2175 return rc;
2176
John Fastabend4f57c082011-01-17 08:06:04 +00002177 if (dev->num_tc)
2178 netif_setup_tc(dev, txq);
2179
Alexander Duyck024e9672013-01-10 08:57:46 +00002180 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002181 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002182#ifdef CONFIG_XPS
2183 netif_reset_xps_queues_gt(dev, txq);
2184#endif
2185 }
John Fastabendf0796d52010-07-01 13:21:57 +00002186 }
Tom Herberte6484932010-10-18 18:04:39 +00002187
2188 dev->real_num_tx_queues = txq;
2189 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002190}
2191EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002192
Michael Daltona953be52014-01-16 22:23:28 -08002193#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002194/**
2195 * netif_set_real_num_rx_queues - set actual number of RX queues used
2196 * @dev: Network device
2197 * @rxq: Actual number of RX queues
2198 *
2199 * This must be called either with the rtnl_lock held or before
2200 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002201 * negative error code. If called before registration, it always
2202 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002203 */
2204int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2205{
2206 int rc;
2207
Tom Herbertbd25fa72010-10-18 18:00:16 +00002208 if (rxq < 1 || rxq > dev->num_rx_queues)
2209 return -EINVAL;
2210
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002211 if (dev->reg_state == NETREG_REGISTERED) {
2212 ASSERT_RTNL();
2213
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002214 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2215 rxq);
2216 if (rc)
2217 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002218 }
2219
2220 dev->real_num_rx_queues = rxq;
2221 return 0;
2222}
2223EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2224#endif
2225
Ben Hutchings2c530402012-07-10 10:55:09 +00002226/**
2227 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002228 *
2229 * This routine should set an upper limit on the number of RSS queues
2230 * used by default by multiqueue devices.
2231 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002232int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002233{
2234 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2235}
2236EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2237
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002238static inline void __netif_reschedule(struct Qdisc *q)
2239{
2240 struct softnet_data *sd;
2241 unsigned long flags;
2242
2243 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002244 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002245 q->next_sched = NULL;
2246 *sd->output_queue_tailp = q;
2247 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002248 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2249 local_irq_restore(flags);
2250}
2251
David S. Miller37437bb2008-07-16 02:15:04 -07002252void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002253{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002254 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2255 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002256}
2257EXPORT_SYMBOL(__netif_schedule);
2258
Eric Dumazete6247022013-12-05 04:45:08 -08002259struct dev_kfree_skb_cb {
2260 enum skb_free_reason reason;
2261};
2262
2263static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002264{
Eric Dumazete6247022013-12-05 04:45:08 -08002265 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002266}
Denis Vlasenko56079432006-03-29 15:57:29 -08002267
John Fastabend46e5da42014-09-12 20:04:52 -07002268void netif_schedule_queue(struct netdev_queue *txq)
2269{
2270 rcu_read_lock();
2271 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2272 struct Qdisc *q = rcu_dereference(txq->qdisc);
2273
2274 __netif_schedule(q);
2275 }
2276 rcu_read_unlock();
2277}
2278EXPORT_SYMBOL(netif_schedule_queue);
2279
2280/**
2281 * netif_wake_subqueue - allow sending packets on subqueue
2282 * @dev: network device
2283 * @queue_index: sub queue index
2284 *
2285 * Resume individual transmit queue of a device with multiple transmit queues.
2286 */
2287void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2288{
2289 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2290
2291 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2292 struct Qdisc *q;
2293
2294 rcu_read_lock();
2295 q = rcu_dereference(txq->qdisc);
2296 __netif_schedule(q);
2297 rcu_read_unlock();
2298 }
2299}
2300EXPORT_SYMBOL(netif_wake_subqueue);
2301
2302void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2303{
2304 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2305 struct Qdisc *q;
2306
2307 rcu_read_lock();
2308 q = rcu_dereference(dev_queue->qdisc);
2309 __netif_schedule(q);
2310 rcu_read_unlock();
2311 }
2312}
2313EXPORT_SYMBOL(netif_tx_wake_queue);
2314
Eric Dumazete6247022013-12-05 04:45:08 -08002315void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2316{
2317 unsigned long flags;
2318
2319 if (likely(atomic_read(&skb->users) == 1)) {
2320 smp_rmb();
2321 atomic_set(&skb->users, 0);
2322 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2323 return;
2324 }
2325 get_kfree_skb_cb(skb)->reason = reason;
2326 local_irq_save(flags);
2327 skb->next = __this_cpu_read(softnet_data.completion_queue);
2328 __this_cpu_write(softnet_data.completion_queue, skb);
2329 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2330 local_irq_restore(flags);
2331}
2332EXPORT_SYMBOL(__dev_kfree_skb_irq);
2333
2334void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002335{
2336 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002337 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002338 else
2339 dev_kfree_skb(skb);
2340}
Eric Dumazete6247022013-12-05 04:45:08 -08002341EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002342
2343
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002344/**
2345 * netif_device_detach - mark device as removed
2346 * @dev: network device
2347 *
2348 * Mark device as removed from system and therefore no longer available.
2349 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002350void netif_device_detach(struct net_device *dev)
2351{
2352 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2353 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002354 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002355 }
2356}
2357EXPORT_SYMBOL(netif_device_detach);
2358
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002359/**
2360 * netif_device_attach - mark device as attached
2361 * @dev: network device
2362 *
2363 * Mark device as attached from system and restart if needed.
2364 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002365void netif_device_attach(struct net_device *dev)
2366{
2367 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2368 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002369 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002370 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002371 }
2372}
2373EXPORT_SYMBOL(netif_device_attach);
2374
Jiri Pirko5605c762015-05-12 14:56:12 +02002375/*
2376 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2377 * to be used as a distribution range.
2378 */
2379u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2380 unsigned int num_tx_queues)
2381{
2382 u32 hash;
2383 u16 qoffset = 0;
2384 u16 qcount = num_tx_queues;
2385
2386 if (skb_rx_queue_recorded(skb)) {
2387 hash = skb_get_rx_queue(skb);
2388 while (unlikely(hash >= num_tx_queues))
2389 hash -= num_tx_queues;
2390 return hash;
2391 }
2392
2393 if (dev->num_tc) {
2394 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2395 qoffset = dev->tc_to_txq[tc].offset;
2396 qcount = dev->tc_to_txq[tc].count;
2397 }
2398
2399 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2400}
2401EXPORT_SYMBOL(__skb_tx_hash);
2402
Ben Hutchings36c92472012-01-17 07:57:56 +00002403static void skb_warn_bad_offload(const struct sk_buff *skb)
2404{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002405 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002406 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01002407 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00002408
Ben Greearc846ad92013-04-19 10:45:52 +00002409 if (!net_ratelimit())
2410 return;
2411
Bjørn Mork88ad4172015-11-16 19:16:40 +01002412 if (dev) {
2413 if (dev->dev.parent)
2414 name = dev_driver_string(dev->dev.parent);
2415 else
2416 name = netdev_name(dev);
2417 }
Ben Hutchings36c92472012-01-17 07:57:56 +00002418 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2419 "gso_type=%d ip_summed=%d\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01002420 name, dev ? &dev->features : &null_features,
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002421 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002422 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2423 skb_shinfo(skb)->gso_type, skb->ip_summed);
2424}
2425
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426/*
2427 * Invalidate hardware checksum when packet is to be mangled, and
2428 * complete checksum manually on outgoing path.
2429 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002430int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431{
Al Virod3bc23e2006-11-14 21:24:49 -08002432 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002433 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
Patrick McHardy84fa7932006-08-29 16:44:56 -07002435 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002436 goto out_set_summed;
2437
2438 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002439 skb_warn_bad_offload(skb);
2440 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 }
2442
Eric Dumazetcef401d2013-01-25 20:34:37 +00002443 /* Before computing a checksum, we should make sure no frag could
2444 * be modified by an external entity : checksum could be wrong.
2445 */
2446 if (skb_has_shared_frag(skb)) {
2447 ret = __skb_linearize(skb);
2448 if (ret)
2449 goto out;
2450 }
2451
Michał Mirosław55508d62010-12-14 15:24:08 +00002452 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002453 BUG_ON(offset >= skb_headlen(skb));
2454 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2455
2456 offset += skb->csum_offset;
2457 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2458
2459 if (skb_cloned(skb) &&
2460 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2462 if (ret)
2463 goto out;
2464 }
2465
Herbert Xua0308472007-10-15 01:47:15 -07002466 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002467out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002469out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 return ret;
2471}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002472EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
Vlad Yasevich53d64712014-03-27 17:26:18 -04002474__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002475{
2476 __be16 type = skb->protocol;
2477
Pravin B Shelar19acc322013-05-07 20:41:07 +00002478 /* Tunnel gso handlers can set protocol to ethernet. */
2479 if (type == htons(ETH_P_TEB)) {
2480 struct ethhdr *eth;
2481
2482 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2483 return 0;
2484
2485 eth = (struct ethhdr *)skb_mac_header(skb);
2486 type = eth->h_proto;
2487 }
2488
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002489 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002490}
2491
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002492/**
2493 * skb_mac_gso_segment - mac layer segmentation handler.
2494 * @skb: buffer to segment
2495 * @features: features for the output path (see dev->features)
2496 */
2497struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2498 netdev_features_t features)
2499{
2500 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2501 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002502 int vlan_depth = skb->mac_len;
2503 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002504
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002505 if (unlikely(!type))
2506 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002507
Vlad Yasevich53d64712014-03-27 17:26:18 -04002508 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002509
2510 rcu_read_lock();
2511 list_for_each_entry_rcu(ptype, &offload_base, list) {
2512 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002513 segs = ptype->callbacks.gso_segment(skb, features);
2514 break;
2515 }
2516 }
2517 rcu_read_unlock();
2518
2519 __skb_push(skb, skb->data - skb_mac_header(skb));
2520
2521 return segs;
2522}
2523EXPORT_SYMBOL(skb_mac_gso_segment);
2524
2525
Cong Wang12b00042013-02-05 16:36:38 +00002526/* openvswitch calls this on rx path, so we need a different check.
2527 */
2528static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2529{
2530 if (tx_path)
2531 return skb->ip_summed != CHECKSUM_PARTIAL;
2532 else
2533 return skb->ip_summed == CHECKSUM_NONE;
2534}
2535
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002536/**
Cong Wang12b00042013-02-05 16:36:38 +00002537 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002538 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002539 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002540 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002541 *
2542 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002543 *
2544 * It may return NULL if the skb requires no segmentation. This is
2545 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002546 */
Cong Wang12b00042013-02-05 16:36:38 +00002547struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2548 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002549{
Cong Wang12b00042013-02-05 16:36:38 +00002550 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002551 int err;
2552
Ben Hutchings36c92472012-01-17 07:57:56 +00002553 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002554
françois romieua40e0a62014-07-15 23:55:35 +02002555 err = skb_cow_head(skb, 0);
2556 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002557 return ERR_PTR(err);
2558 }
2559
Pravin B Shelar68c33162013-02-14 14:02:41 +00002560 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002561 SKB_GSO_CB(skb)->encap_level = 0;
2562
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002563 skb_reset_mac_header(skb);
2564 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002565
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002566 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002567}
Cong Wang12b00042013-02-05 16:36:38 +00002568EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002569
Herbert Xufb286bb2005-11-10 13:01:24 -08002570/* Take action when hardware reception checksum errors are detected. */
2571#ifdef CONFIG_BUG
2572void netdev_rx_csum_fault(struct net_device *dev)
2573{
2574 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002575 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002576 dump_stack();
2577 }
2578}
2579EXPORT_SYMBOL(netdev_rx_csum_fault);
2580#endif
2581
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582/* Actually, we should eliminate this check as soon as we know, that:
2583 * 1. IOMMU is present and allows to map all the memory.
2584 * 2. No high memory really exists on this machine.
2585 */
2586
Florian Westphalc1e756b2014-05-05 15:00:44 +02002587static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002589#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002591 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002592 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2593 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2594 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002595 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002596 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002599 if (PCI_DMA_BUS_IS_PHYS) {
2600 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601
Eric Dumazet9092c652010-04-02 13:34:49 -07002602 if (!pdev)
2603 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002604 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002605 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2606 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002607 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2608 return 1;
2609 }
2610 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002611#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 return 0;
2613}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Simon Horman3b392dd2014-06-04 08:53:17 +09002615/* If MPLS offload request, verify we are testing hardware MPLS features
2616 * instead of standard features for the netdev.
2617 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002618#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002619static netdev_features_t net_mpls_features(struct sk_buff *skb,
2620 netdev_features_t features,
2621 __be16 type)
2622{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002623 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002624 features &= skb->dev->mpls_features;
2625
2626 return features;
2627}
2628#else
2629static netdev_features_t net_mpls_features(struct sk_buff *skb,
2630 netdev_features_t features,
2631 __be16 type)
2632{
2633 return features;
2634}
2635#endif
2636
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002637static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002638 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002639{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002640 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002641 __be16 type;
2642
2643 type = skb_network_protocol(skb, &tmp);
2644 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002645
Ed Cashinc0d680e2012-09-19 15:49:00 +00002646 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002647 !can_checksum_protocol(features, type)) {
Tom Herberta1882222015-12-14 11:19:43 -08002648 features &= ~NETIF_F_CSUM_MASK;
Florian Westphalc1e756b2014-05-05 15:00:44 +02002649 } else if (illegal_highdma(skb->dev, skb)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002650 features &= ~NETIF_F_SG;
2651 }
2652
2653 return features;
2654}
2655
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002656netdev_features_t passthru_features_check(struct sk_buff *skb,
2657 struct net_device *dev,
2658 netdev_features_t features)
2659{
2660 return features;
2661}
2662EXPORT_SYMBOL(passthru_features_check);
2663
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002664static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2665 struct net_device *dev,
2666 netdev_features_t features)
2667{
2668 return vlan_features_check(skb, features);
2669}
2670
Florian Westphalc1e756b2014-05-05 15:00:44 +02002671netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002672{
Jesse Gross5f352272014-12-23 22:37:26 -08002673 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002674 netdev_features_t features = dev->features;
2675 u16 gso_segs = skb_shinfo(skb)->gso_segs;
Jesse Gross58e998c2010-10-29 12:14:55 +00002676
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002677 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
Ben Hutchings30b678d2012-07-30 15:57:00 +00002678 features &= ~NETIF_F_GSO_MASK;
2679
Jesse Gross5f352272014-12-23 22:37:26 -08002680 /* If encapsulation offload request, verify we are testing
2681 * hardware encapsulation features instead of standard
2682 * features for the netdev
2683 */
2684 if (skb->encapsulation)
2685 features &= dev->hw_enc_features;
2686
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09002687 if (skb_vlan_tagged(skb))
2688 features = netdev_intersect_features(features,
2689 dev->vlan_features |
2690 NETIF_F_HW_VLAN_CTAG_TX |
2691 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00002692
Jesse Gross5f352272014-12-23 22:37:26 -08002693 if (dev->netdev_ops->ndo_features_check)
2694 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2695 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002696 else
2697 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002698
Florian Westphalc1e756b2014-05-05 15:00:44 +02002699 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002700}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002701EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002702
David S. Miller2ea25512014-08-29 21:10:01 -07002703static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07002704 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002705{
David S. Miller2ea25512014-08-29 21:10:01 -07002706 unsigned int len;
2707 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08002708
Salam Noureddine7866a622015-01-27 11:35:48 -08002709 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07002710 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00002711
David S. Miller2ea25512014-08-29 21:10:01 -07002712 len = skb->len;
2713 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07002714 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07002715 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00002716
Patrick McHardy572a9d72009-11-10 06:14:14 +00002717 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002718}
David S. Miller2ea25512014-08-29 21:10:01 -07002719
David S. Miller8dcda222014-09-01 15:06:40 -07002720struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2721 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07002722{
2723 struct sk_buff *skb = first;
2724 int rc = NETDEV_TX_OK;
2725
2726 while (skb) {
2727 struct sk_buff *next = skb->next;
2728
2729 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07002730 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07002731 if (unlikely(!dev_xmit_complete(rc))) {
2732 skb->next = next;
2733 goto out;
2734 }
2735
2736 skb = next;
2737 if (netif_xmit_stopped(txq) && skb) {
2738 rc = NETDEV_TX_BUSY;
2739 break;
2740 }
2741 }
2742
2743out:
2744 *ret = rc;
2745 return skb;
2746}
2747
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07002748static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2749 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07002750{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002751 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01002752 !vlan_hw_offload_capable(features, skb->vlan_proto))
2753 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07002754 return skb;
2755}
2756
Eric Dumazet55a93b32014-10-03 15:31:07 -07002757static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
David S. Millereae3f882014-08-30 15:17:13 -07002758{
2759 netdev_features_t features;
2760
2761 if (skb->next)
2762 return skb;
2763
David S. Millereae3f882014-08-30 15:17:13 -07002764 features = netif_skb_features(skb);
2765 skb = validate_xmit_vlan(skb, features);
2766 if (unlikely(!skb))
2767 goto out_null;
2768
Johannes Berg8b86a612015-04-17 15:45:04 +02002769 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07002770 struct sk_buff *segs;
2771
2772 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08002773 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08002774 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08002775 } else if (segs) {
2776 consume_skb(skb);
2777 skb = segs;
2778 }
David S. Millereae3f882014-08-30 15:17:13 -07002779 } else {
2780 if (skb_needs_linearize(skb, features) &&
2781 __skb_linearize(skb))
2782 goto out_kfree_skb;
2783
2784 /* If packet is not checksummed and device does not
2785 * support checksumming for this protocol, complete
2786 * checksumming here.
2787 */
2788 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2789 if (skb->encapsulation)
2790 skb_set_inner_transport_header(skb,
2791 skb_checksum_start_offset(skb));
2792 else
2793 skb_set_transport_header(skb,
2794 skb_checksum_start_offset(skb));
Tom Herberta1882222015-12-14 11:19:43 -08002795 if (!(features & NETIF_F_CSUM_MASK) &&
David S. Millereae3f882014-08-30 15:17:13 -07002796 skb_checksum_help(skb))
2797 goto out_kfree_skb;
2798 }
2799 }
2800
2801 return skb;
2802
2803out_kfree_skb:
2804 kfree_skb(skb);
2805out_null:
2806 return NULL;
2807}
2808
Eric Dumazet55a93b32014-10-03 15:31:07 -07002809struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2810{
2811 struct sk_buff *next, *head = NULL, *tail;
2812
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07002813 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07002814 next = skb->next;
2815 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07002816
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07002817 /* in case skb wont be segmented, point to itself */
2818 skb->prev = skb;
2819
2820 skb = validate_xmit_skb(skb, dev);
2821 if (!skb)
2822 continue;
2823
2824 if (!head)
2825 head = skb;
2826 else
2827 tail->next = skb;
2828 /* If skb was segmented, skb->prev points to
2829 * the last segment. If not, it still contains skb.
2830 */
2831 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07002832 }
2833 return head;
2834}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002835
Eric Dumazet1def9232013-01-10 12:36:42 +00002836static void qdisc_pkt_len_init(struct sk_buff *skb)
2837{
2838 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2839
2840 qdisc_skb_cb(skb)->pkt_len = skb->len;
2841
2842 /* To get more precise estimation of bytes sent on wire,
2843 * we add to pkt_len the headers size of all segments
2844 */
2845 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002846 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002847 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002848
Eric Dumazet757b8b12013-01-15 21:14:21 -08002849 /* mac layer + network layer */
2850 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2851
2852 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002853 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2854 hdr_len += tcp_hdrlen(skb);
2855 else
2856 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002857
2858 if (shinfo->gso_type & SKB_GSO_DODGY)
2859 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2860 shinfo->gso_size);
2861
2862 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002863 }
2864}
2865
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002866static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2867 struct net_device *dev,
2868 struct netdev_queue *txq)
2869{
2870 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002871 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002872 int rc;
2873
Eric Dumazet1def9232013-01-10 12:36:42 +00002874 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002875 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002876 /*
2877 * Heuristic to force contended enqueues to serialize on a
2878 * separate lock before trying to get qdisc main lock.
Ying Xue9bf2b8c2014-06-26 15:56:31 +08002879 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2880 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07002881 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002882 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002883 if (unlikely(contended))
2884 spin_lock(&q->busylock);
2885
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002886 spin_lock(root_lock);
2887 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2888 kfree_skb(skb);
2889 rc = NET_XMIT_DROP;
2890 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002891 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002892 /*
2893 * This is a work-conserving queue; there are no old skbs
2894 * waiting to be sent out; and the qdisc is not running -
2895 * xmit the skb directly.
2896 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002897
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002898 qdisc_bstats_update(q, skb);
2899
Eric Dumazet55a93b32014-10-03 15:31:07 -07002900 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07002901 if (unlikely(contended)) {
2902 spin_unlock(&q->busylock);
2903 contended = false;
2904 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002905 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002906 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002907 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002908
2909 rc = NET_XMIT_SUCCESS;
2910 } else {
Eric Dumazeta2da5702011-01-20 03:48:19 +00002911 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002912 if (qdisc_run_begin(q)) {
2913 if (unlikely(contended)) {
2914 spin_unlock(&q->busylock);
2915 contended = false;
2916 }
2917 __qdisc_run(q);
2918 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002919 }
2920 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002921 if (unlikely(contended))
2922 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002923 return rc;
2924}
2925
Daniel Borkmann86f85152013-12-29 17:27:11 +01002926#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00002927static void skb_update_prio(struct sk_buff *skb)
2928{
Igor Maravic6977a792011-11-25 07:44:54 +00002929 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002930
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002931 if (!skb->priority && skb->sk && map) {
Tejun Heo2a56a1f2015-12-07 17:38:52 -05002932 unsigned int prioidx =
2933 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002934
2935 if (prioidx < map->priomap_len)
2936 skb->priority = map->priomap[prioidx];
2937 }
Neil Horman5bc14212011-11-22 05:10:51 +00002938}
2939#else
2940#define skb_update_prio(skb)
2941#endif
2942
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02002943DEFINE_PER_CPU(int, xmit_recursion);
2944EXPORT_SYMBOL(xmit_recursion);
2945
David S. Miller11a766c2010-10-25 12:51:55 -07002946#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002947
Dave Jonesd29f7492008-07-22 14:09:06 -07002948/**
Michel Machado95603e22012-06-12 10:16:35 +00002949 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05002950 * @net: network namespace this loopback is happening in
2951 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00002952 * @skb: buffer to transmit
2953 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05002954int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00002955{
2956 skb_reset_mac_header(skb);
2957 __skb_pull(skb, skb_network_offset(skb));
2958 skb->pkt_type = PACKET_LOOPBACK;
2959 skb->ip_summed = CHECKSUM_UNNECESSARY;
2960 WARN_ON(!skb_dst(skb));
2961 skb_dst_force(skb);
2962 netif_rx_ni(skb);
2963 return 0;
2964}
2965EXPORT_SYMBOL(dev_loopback_xmit);
2966
Jiri Pirko638b2a62015-05-12 14:56:13 +02002967static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2968{
2969#ifdef CONFIG_XPS
2970 struct xps_dev_maps *dev_maps;
2971 struct xps_map *map;
2972 int queue_index = -1;
2973
2974 rcu_read_lock();
2975 dev_maps = rcu_dereference(dev->xps_maps);
2976 if (dev_maps) {
2977 map = rcu_dereference(
2978 dev_maps->cpu_map[skb->sender_cpu - 1]);
2979 if (map) {
2980 if (map->len == 1)
2981 queue_index = map->queues[0];
2982 else
2983 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
2984 map->len)];
2985 if (unlikely(queue_index >= dev->real_num_tx_queues))
2986 queue_index = -1;
2987 }
2988 }
2989 rcu_read_unlock();
2990
2991 return queue_index;
2992#else
2993 return -1;
2994#endif
2995}
2996
2997static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2998{
2999 struct sock *sk = skb->sk;
3000 int queue_index = sk_tx_queue_get(sk);
3001
3002 if (queue_index < 0 || skb->ooo_okay ||
3003 queue_index >= dev->real_num_tx_queues) {
3004 int new_index = get_xps_queue(dev, skb);
3005 if (new_index < 0)
3006 new_index = skb_tx_hash(dev, skb);
3007
3008 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003009 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003010 rcu_access_pointer(sk->sk_dst_cache))
3011 sk_tx_queue_set(sk, new_index);
3012
3013 queue_index = new_index;
3014 }
3015
3016 return queue_index;
3017}
3018
3019struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3020 struct sk_buff *skb,
3021 void *accel_priv)
3022{
3023 int queue_index = 0;
3024
3025#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08003026 u32 sender_cpu = skb->sender_cpu - 1;
3027
3028 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003029 skb->sender_cpu = raw_smp_processor_id() + 1;
3030#endif
3031
3032 if (dev->real_num_tx_queues != 1) {
3033 const struct net_device_ops *ops = dev->netdev_ops;
3034 if (ops->ndo_select_queue)
3035 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3036 __netdev_pick_tx);
3037 else
3038 queue_index = __netdev_pick_tx(dev, skb);
3039
3040 if (!accel_priv)
3041 queue_index = netdev_cap_txqueue(dev, queue_index);
3042 }
3043
3044 skb_set_queue_mapping(skb, queue_index);
3045 return netdev_get_tx_queue(dev, queue_index);
3046}
3047
Michel Machado95603e22012-06-12 10:16:35 +00003048/**
Jason Wang9d08dd32014-01-20 11:25:13 +08003049 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07003050 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08003051 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07003052 *
3053 * Queue a buffer for transmission to a network device. The caller must
3054 * have set the device and priority and built the buffer before calling
3055 * this function. The function can be called from an interrupt.
3056 *
3057 * A negative errno code is returned on a failure. A success does not
3058 * guarantee the frame will be transmitted as it may be dropped due
3059 * to congestion or traffic shaping.
3060 *
3061 * -----------------------------------------------------------------------------------
3062 * I notice this method can also return errors from the queue disciplines,
3063 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3064 * be positive.
3065 *
3066 * Regardless of the return value, the skb is consumed, so it is currently
3067 * difficult to retry a send to this method. (You can bump the ref count
3068 * before sending to hold a reference for retry if you are careful.)
3069 *
3070 * When calling this method, interrupts MUST be enabled. This is because
3071 * the BH enable code must have IRQs enabled so that it will not deadlock.
3072 * --BLG
3073 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05303074static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075{
3076 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07003077 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 struct Qdisc *q;
3079 int rc = -ENOMEM;
3080
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00003081 skb_reset_mac_header(skb);
3082
Willem de Bruijne7fd2882014-08-04 22:11:48 -04003083 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3084 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3085
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003086 /* Disable soft irqs for various locks below. Also
3087 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003089 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090
Neil Horman5bc14212011-11-22 05:10:51 +00003091 skb_update_prio(skb);
3092
Eric Dumazet02875872014-10-05 18:38:35 -07003093 /* If device/qdisc don't need skb->dst, release it right now while
3094 * its hot in this cpu cache.
3095 */
3096 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3097 skb_dst_drop(skb);
3098 else
3099 skb_dst_force(skb);
3100
Scott Feldman0c4f6912015-07-18 18:24:48 -07003101#ifdef CONFIG_NET_SWITCHDEV
3102 /* Don't forward if offload device already forwarded */
3103 if (skb->offload_fwd_mark &&
3104 skb->offload_fwd_mark == dev->offload_fwd_mark) {
3105 consume_skb(skb);
3106 rc = NET_XMIT_SUCCESS;
3107 goto out;
3108 }
3109#endif
3110
Jason Wangf663dd92014-01-10 16:18:26 +08003111 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08003112 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07003113
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003115 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09003117 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003119 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07003120 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 }
3122
3123 /* The device has no queue. Common case for software devices:
3124 loopback, all the sorts of tunnels...
3125
Herbert Xu932ff272006-06-09 12:20:56 -07003126 Really, it is unlikely that netif_tx_lock protection is necessary
3127 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 counters.)
3129 However, it is possible, that they rely on protection
3130 made by us here.
3131
3132 Check this and shot the lock. It is not prone from deadlocks.
3133 Either shot noqueue qdisc, it is even simpler 8)
3134 */
3135 if (dev->flags & IFF_UP) {
3136 int cpu = smp_processor_id(); /* ok because BHs are off */
3137
David S. Millerc773e842008-07-08 23:13:53 -07003138 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139
Eric Dumazet745e20f2010-09-29 13:23:09 -07003140 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3141 goto recursion_alert;
3142
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003143 skb = validate_xmit_skb(skb, dev);
3144 if (!skb)
3145 goto drop;
3146
David S. Millerc773e842008-07-08 23:13:53 -07003147 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148
Tom Herbert734664982011-11-28 16:32:44 +00003149 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07003150 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07003151 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07003152 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00003153 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003154 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 goto out;
3156 }
3157 }
David S. Millerc773e842008-07-08 23:13:53 -07003158 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003159 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3160 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 } else {
3162 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003163 * unfortunately
3164 */
3165recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003166 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3167 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 }
3169 }
3170
3171 rc = -ENETDOWN;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003172drop:
Herbert Xud4828d82006-06-22 02:28:18 -07003173 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174
Eric Dumazet015f0682014-03-27 08:45:56 -07003175 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003176 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 return rc;
3178out:
Herbert Xud4828d82006-06-22 02:28:18 -07003179 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 return rc;
3181}
Jason Wangf663dd92014-01-10 16:18:26 +08003182
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003183int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003184{
3185 return __dev_queue_xmit(skb, NULL);
3186}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05003187EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188
Jason Wangf663dd92014-01-10 16:18:26 +08003189int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3190{
3191 return __dev_queue_xmit(skb, accel_priv);
3192}
3193EXPORT_SYMBOL(dev_queue_xmit_accel);
3194
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195
3196/*=======================================================================
3197 Receiver routines
3198 =======================================================================*/
3199
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003200int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003201EXPORT_SYMBOL(netdev_max_backlog);
3202
Eric Dumazet3b098e22010-05-15 23:57:10 -07003203int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003204int netdev_budget __read_mostly = 300;
3205int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003207/* Called with irq disabled */
3208static inline void ____napi_schedule(struct softnet_data *sd,
3209 struct napi_struct *napi)
3210{
3211 list_add_tail(&napi->poll_list, &sd->poll_list);
3212 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3213}
3214
Eric Dumazetdf334542010-03-24 19:13:54 +00003215#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003216
3217/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003218struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003219EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003220u32 rps_cpu_mask __read_mostly;
3221EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003222
Ingo Molnarc5905af2012-02-24 08:31:31 +01003223struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00003224
Ben Hutchingsc4454772011-01-19 11:03:53 +00003225static struct rps_dev_flow *
3226set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3227 struct rps_dev_flow *rflow, u16 next_cpu)
3228{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003229 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003230#ifdef CONFIG_RFS_ACCEL
3231 struct netdev_rx_queue *rxqueue;
3232 struct rps_dev_flow_table *flow_table;
3233 struct rps_dev_flow *old_rflow;
3234 u32 flow_id;
3235 u16 rxq_index;
3236 int rc;
3237
3238 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003239 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3240 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003241 goto out;
3242 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3243 if (rxq_index == skb_get_rx_queue(skb))
3244 goto out;
3245
3246 rxqueue = dev->_rx + rxq_index;
3247 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3248 if (!flow_table)
3249 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003250 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003251 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3252 rxq_index, flow_id);
3253 if (rc < 0)
3254 goto out;
3255 old_rflow = rflow;
3256 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003257 rflow->filter = rc;
3258 if (old_rflow->filter == rflow->filter)
3259 old_rflow->filter = RPS_NO_FILTER;
3260 out:
3261#endif
3262 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003263 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003264 }
3265
Ben Hutchings09994d12011-10-03 04:42:46 +00003266 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003267 return rflow;
3268}
3269
Tom Herbert0a9627f2010-03-16 08:03:29 +00003270/*
3271 * get_rps_cpu is called from netif_receive_skb and returns the target
3272 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003273 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003274 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003275static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3276 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003277{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003278 const struct rps_sock_flow_table *sock_flow_table;
3279 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003280 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003281 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003282 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003283 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003284 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003285
Tom Herbert0a9627f2010-03-16 08:03:29 +00003286 if (skb_rx_queue_recorded(skb)) {
3287 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003288
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003289 if (unlikely(index >= dev->real_num_rx_queues)) {
3290 WARN_ONCE(dev->real_num_rx_queues > 1,
3291 "%s received packet on queue %u, but number "
3292 "of RX queues is %u\n",
3293 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003294 goto done;
3295 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003296 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003297 }
3298
Eric Dumazet567e4b72015-02-06 12:59:01 -08003299 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3300
3301 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3302 map = rcu_dereference(rxqueue->rps_map);
3303 if (!flow_table && !map)
3304 goto done;
3305
Changli Gao2d47b452010-08-17 19:00:56 +00003306 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003307 hash = skb_get_hash(skb);
3308 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003309 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003310
Tom Herbertfec5e652010-04-16 16:01:27 -07003311 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3312 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003313 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003314 u32 next_cpu;
3315 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003316
Eric Dumazet567e4b72015-02-06 12:59:01 -08003317 /* First check into global flow table if there is a match */
3318 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3319 if ((ident ^ hash) & ~rps_cpu_mask)
3320 goto try_rps;
3321
3322 next_cpu = ident & rps_cpu_mask;
3323
3324 /* OK, now we know there is a match,
3325 * we can look at the local (per receive queue) flow table
3326 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003327 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003328 tcpu = rflow->cpu;
3329
Tom Herbertfec5e652010-04-16 16:01:27 -07003330 /*
3331 * If the desired CPU (where last recvmsg was done) is
3332 * different from current CPU (one in the rx-queue flow
3333 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003334 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003335 * - Current CPU is offline.
3336 * - The current CPU's queue tail has advanced beyond the
3337 * last packet that was enqueued using this table entry.
3338 * This guarantees that all previous packets for the flow
3339 * have been dequeued, thus preserving in order delivery.
3340 */
3341 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003342 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003343 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003344 rflow->last_qtail)) >= 0)) {
3345 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003346 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003347 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003348
Eric Dumazeta31196b2015-04-25 09:35:24 -07003349 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003350 *rflowp = rflow;
3351 cpu = tcpu;
3352 goto done;
3353 }
3354 }
3355
Eric Dumazet567e4b72015-02-06 12:59:01 -08003356try_rps:
3357
Tom Herbert0a9627f2010-03-16 08:03:29 +00003358 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003359 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003360 if (cpu_online(tcpu)) {
3361 cpu = tcpu;
3362 goto done;
3363 }
3364 }
3365
3366done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003367 return cpu;
3368}
3369
Ben Hutchingsc4454772011-01-19 11:03:53 +00003370#ifdef CONFIG_RFS_ACCEL
3371
3372/**
3373 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3374 * @dev: Device on which the filter was set
3375 * @rxq_index: RX queue index
3376 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3377 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3378 *
3379 * Drivers that implement ndo_rx_flow_steer() should periodically call
3380 * this function for each installed filter and remove the filters for
3381 * which it returns %true.
3382 */
3383bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3384 u32 flow_id, u16 filter_id)
3385{
3386 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3387 struct rps_dev_flow_table *flow_table;
3388 struct rps_dev_flow *rflow;
3389 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003390 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003391
3392 rcu_read_lock();
3393 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3394 if (flow_table && flow_id <= flow_table->mask) {
3395 rflow = &flow_table->flows[flow_id];
3396 cpu = ACCESS_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003397 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003398 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3399 rflow->last_qtail) <
3400 (int)(10 * flow_table->mask)))
3401 expire = false;
3402 }
3403 rcu_read_unlock();
3404 return expire;
3405}
3406EXPORT_SYMBOL(rps_may_expire_flow);
3407
3408#endif /* CONFIG_RFS_ACCEL */
3409
Tom Herbert0a9627f2010-03-16 08:03:29 +00003410/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003411static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003412{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003413 struct softnet_data *sd = data;
3414
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003415 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003416 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003417}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003418
Tom Herbertfec5e652010-04-16 16:01:27 -07003419#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003420
3421/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003422 * Check if this softnet_data structure is another cpu one
3423 * If yes, queue it to our IPI list and return 1
3424 * If no, return 0
3425 */
3426static int rps_ipi_queued(struct softnet_data *sd)
3427{
3428#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003429 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003430
3431 if (sd != mysd) {
3432 sd->rps_ipi_next = mysd->rps_ipi_list;
3433 mysd->rps_ipi_list = sd;
3434
3435 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3436 return 1;
3437 }
3438#endif /* CONFIG_RPS */
3439 return 0;
3440}
3441
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003442#ifdef CONFIG_NET_FLOW_LIMIT
3443int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3444#endif
3445
3446static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3447{
3448#ifdef CONFIG_NET_FLOW_LIMIT
3449 struct sd_flow_limit *fl;
3450 struct softnet_data *sd;
3451 unsigned int old_flow, new_flow;
3452
3453 if (qlen < (netdev_max_backlog >> 1))
3454 return false;
3455
Christoph Lameter903ceff2014-08-17 12:30:35 -05003456 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003457
3458 rcu_read_lock();
3459 fl = rcu_dereference(sd->flow_limit);
3460 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003461 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003462 old_flow = fl->history[fl->history_head];
3463 fl->history[fl->history_head] = new_flow;
3464
3465 fl->history_head++;
3466 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3467
3468 if (likely(fl->buckets[old_flow]))
3469 fl->buckets[old_flow]--;
3470
3471 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3472 fl->count++;
3473 rcu_read_unlock();
3474 return true;
3475 }
3476 }
3477 rcu_read_unlock();
3478#endif
3479 return false;
3480}
3481
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003482/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003483 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3484 * queue (may be a remote CPU queue).
3485 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003486static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3487 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003488{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003489 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003490 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003491 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003492
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003493 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003494
3495 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003496
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003497 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003498 if (!netif_running(skb->dev))
3499 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003500 qlen = skb_queue_len(&sd->input_pkt_queue);
3501 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003502 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003503enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003504 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003505 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003506 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003507 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003508 return NET_RX_SUCCESS;
3509 }
3510
Eric Dumazetebda37c22010-05-06 23:51:21 +00003511 /* Schedule NAPI for backlog device
3512 * We can use non atomic operation since we own the queue lock
3513 */
3514 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003515 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003516 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003517 }
3518 goto enqueue;
3519 }
3520
Julian Anastasove9e4dd32015-07-09 09:59:09 +03003521drop:
Changli Gaodee42872010-05-02 05:42:16 +00003522 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003523 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003524
Tom Herbert0a9627f2010-03-16 08:03:29 +00003525 local_irq_restore(flags);
3526
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003527 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003528 kfree_skb(skb);
3529 return NET_RX_DROP;
3530}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003532static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003534 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
Eric Dumazet588f0332011-11-15 04:12:55 +00003536 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
Koki Sanagicf66ba52010-08-23 18:45:02 +09003538 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003539#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003540 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003541 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003542 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543
Changli Gaocece1942010-08-07 20:35:43 -07003544 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003545 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003546
3547 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003548 if (cpu < 0)
3549 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003550
3551 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3552
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003553 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003554 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003555 } else
3556#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003557 {
3558 unsigned int qtail;
3559 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3560 put_cpu();
3561 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003562 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003564
3565/**
3566 * netif_rx - post buffer to the network code
3567 * @skb: buffer to post
3568 *
3569 * This function receives a packet from a device driver and queues it for
3570 * the upper (protocol) levels to process. It always succeeds. The buffer
3571 * may be dropped during processing for congestion control or by the
3572 * protocol layers.
3573 *
3574 * return values:
3575 * NET_RX_SUCCESS (no congestion)
3576 * NET_RX_DROP (packet was dropped)
3577 *
3578 */
3579
3580int netif_rx(struct sk_buff *skb)
3581{
3582 trace_netif_rx_entry(skb);
3583
3584 return netif_rx_internal(skb);
3585}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003586EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587
3588int netif_rx_ni(struct sk_buff *skb)
3589{
3590 int err;
3591
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003592 trace_netif_rx_ni_entry(skb);
3593
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003595 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 if (local_softirq_pending())
3597 do_softirq();
3598 preempt_enable();
3599
3600 return err;
3601}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602EXPORT_SYMBOL(netif_rx_ni);
3603
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604static void net_tx_action(struct softirq_action *h)
3605{
Christoph Lameter903ceff2014-08-17 12:30:35 -05003606 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
3608 if (sd->completion_queue) {
3609 struct sk_buff *clist;
3610
3611 local_irq_disable();
3612 clist = sd->completion_queue;
3613 sd->completion_queue = NULL;
3614 local_irq_enable();
3615
3616 while (clist) {
3617 struct sk_buff *skb = clist;
3618 clist = clist->next;
3619
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003620 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003621 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3622 trace_consume_skb(skb);
3623 else
3624 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 __kfree_skb(skb);
3626 }
3627 }
3628
3629 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003630 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631
3632 local_irq_disable();
3633 head = sd->output_queue;
3634 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003635 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 local_irq_enable();
3637
3638 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003639 struct Qdisc *q = head;
3640 spinlock_t *root_lock;
3641
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642 head = head->next_sched;
3643
David S. Miller5fb66222008-08-02 20:02:43 -07003644 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003645 if (spin_trylock(root_lock)) {
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003646 smp_mb__before_atomic();
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003647 clear_bit(__QDISC_STATE_SCHED,
3648 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003649 qdisc_run(q);
3650 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003652 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003653 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003654 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003655 } else {
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003656 smp_mb__before_atomic();
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003657 clear_bit(__QDISC_STATE_SCHED,
3658 &q->state);
3659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 }
3661 }
3662 }
3663}
3664
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003665#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3666 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003667/* This hook is defined here for ATM LANE */
3668int (*br_fdb_test_addr_hook)(struct net_device *dev,
3669 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003670EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003671#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672
Herbert Xuf697c3e2007-10-14 00:38:47 -07003673static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3674 struct packet_type **pt_prev,
3675 int *ret, struct net_device *orig_dev)
3676{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003677#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003678 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3679 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00003680
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003681 /* If there's at least one ingress present somewhere (so
3682 * we get here via enabled static key), remaining devices
3683 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003684 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003685 */
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003686 if (!cl)
Daniel Borkmann45771392015-04-10 23:07:54 +02003687 return skb;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003688 if (*pt_prev) {
3689 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3690 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003691 }
3692
Florian Westphal33654952015-05-14 00:36:28 +02003693 qdisc_skb_cb(skb)->pkt_len = skb->len;
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003694 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
Eric Dumazet24ea5912015-07-06 05:18:03 -07003695 qdisc_bstats_cpu_update(cl->q, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02003696
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02003697 switch (tc_classify(skb, cl, &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003698 case TC_ACT_OK:
3699 case TC_ACT_RECLASSIFY:
3700 skb->tc_index = TC_H_MIN(cl_res.classid);
3701 break;
3702 case TC_ACT_SHOT:
Eric Dumazet24ea5912015-07-06 05:18:03 -07003703 qdisc_qstats_cpu_drop(cl->q);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003704 case TC_ACT_STOLEN:
3705 case TC_ACT_QUEUED:
3706 kfree_skb(skb);
3707 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07003708 case TC_ACT_REDIRECT:
3709 /* skb_mac_header check was done by cls/act_bpf, so
3710 * we can safely push the L2 header back before
3711 * redirecting to another netdev
3712 */
3713 __skb_push(skb, skb->mac_len);
3714 skb_do_redirect(skb);
3715 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02003716 default:
3717 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003718 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003719#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07003720 return skb;
3721}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003723/**
3724 * netdev_rx_handler_register - register receive handler
3725 * @dev: device to register a handler for
3726 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003727 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003728 *
Masanari Iidae2278672014-02-18 22:54:36 +09003729 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003730 * called from __netif_receive_skb. A negative errno code is returned
3731 * on a failure.
3732 *
3733 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003734 *
3735 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003736 */
3737int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003738 rx_handler_func_t *rx_handler,
3739 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003740{
3741 ASSERT_RTNL();
3742
3743 if (dev->rx_handler)
3744 return -EBUSY;
3745
Eric Dumazet00cfec32013-03-29 03:01:22 +00003746 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003747 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003748 rcu_assign_pointer(dev->rx_handler, rx_handler);
3749
3750 return 0;
3751}
3752EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3753
3754/**
3755 * netdev_rx_handler_unregister - unregister receive handler
3756 * @dev: device to unregister a handler from
3757 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003758 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003759 *
3760 * The caller must hold the rtnl_mutex.
3761 */
3762void netdev_rx_handler_unregister(struct net_device *dev)
3763{
3764
3765 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003766 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003767 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3768 * section has a guarantee to see a non NULL rx_handler_data
3769 * as well.
3770 */
3771 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003772 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003773}
3774EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3775
Mel Gormanb4b9e352012-07-31 16:44:26 -07003776/*
3777 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3778 * the special handling of PFMEMALLOC skbs.
3779 */
3780static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3781{
3782 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07003783 case htons(ETH_P_ARP):
3784 case htons(ETH_P_IP):
3785 case htons(ETH_P_IPV6):
3786 case htons(ETH_P_8021Q):
3787 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003788 return true;
3789 default:
3790 return false;
3791 }
3792}
3793
Pablo Neirae687ad62015-05-13 18:19:38 +02003794static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
3795 int *ret, struct net_device *orig_dev)
3796{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003797#ifdef CONFIG_NETFILTER_INGRESS
Pablo Neirae687ad62015-05-13 18:19:38 +02003798 if (nf_hook_ingress_active(skb)) {
3799 if (*pt_prev) {
3800 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3801 *pt_prev = NULL;
3802 }
3803
3804 return nf_hook_ingress(skb);
3805 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02003806#endif /* CONFIG_NETFILTER_INGRESS */
Pablo Neirae687ad62015-05-13 18:19:38 +02003807 return 0;
3808}
Pablo Neirae687ad62015-05-13 18:19:38 +02003809
David S. Miller9754e292013-02-14 15:57:38 -05003810static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811{
3812 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003813 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003814 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003815 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003817 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818
Eric Dumazet588f0332011-11-15 04:12:55 +00003819 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003820
Koki Sanagicf66ba52010-08-23 18:45:02 +09003821 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003822
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003823 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003824
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003825 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003826 if (!skb_transport_header_was_set(skb))
3827 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003828 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829
3830 pt_prev = NULL;
3831
David S. Miller63d8ea72011-02-28 10:48:59 -08003832another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003833 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003834
3835 __this_cpu_inc(softnet_data.processed);
3836
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003837 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3838 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04003839 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003840 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03003841 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003842 }
3843
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844#ifdef CONFIG_NET_CLS_ACT
3845 if (skb->tc_verd & TC_NCLS) {
3846 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3847 goto ncls;
3848 }
3849#endif
3850
David S. Miller9754e292013-02-14 15:57:38 -05003851 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003852 goto skip_taps;
3853
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08003855 if (pt_prev)
3856 ret = deliver_skb(skb, pt_prev, orig_dev);
3857 pt_prev = ptype;
3858 }
3859
3860 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3861 if (pt_prev)
3862 ret = deliver_skb(skb, pt_prev, orig_dev);
3863 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864 }
3865
Mel Gormanb4b9e352012-07-31 16:44:26 -07003866skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02003867#ifdef CONFIG_NET_INGRESS
Daniel Borkmann45771392015-04-10 23:07:54 +02003868 if (static_key_false(&ingress_needed)) {
3869 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3870 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03003871 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02003872
3873 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03003874 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02003875 }
Pablo Neira1cf519002015-05-13 18:19:37 +02003876#endif
3877#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmann45771392015-04-10 23:07:54 +02003878 skb->tc_verd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879ncls:
3880#endif
David S. Miller9754e292013-02-14 15:57:38 -05003881 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003882 goto drop;
3883
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003884 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00003885 if (pt_prev) {
3886 ret = deliver_skb(skb, pt_prev, orig_dev);
3887 pt_prev = NULL;
3888 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003889 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003890 goto another_round;
3891 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03003892 goto out;
John Fastabend24257172011-10-10 09:16:41 +00003893 }
3894
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003895 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003896 if (rx_handler) {
3897 if (pt_prev) {
3898 ret = deliver_skb(skb, pt_prev, orig_dev);
3899 pt_prev = NULL;
3900 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003901 switch (rx_handler(&skb)) {
3902 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003903 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03003904 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003905 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003906 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003907 case RX_HANDLER_EXACT:
3908 deliver_exact = true;
3909 case RX_HANDLER_PASS:
3910 break;
3911 default:
3912 BUG();
3913 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003914 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003916 if (unlikely(skb_vlan_tag_present(skb))) {
3917 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07003918 skb->pkt_type = PACKET_OTHERHOST;
3919 /* Note: we might in the future use prio bits
3920 * and set skb->priority like in vlan_do_receive()
3921 * For the time being, just ignore Priority Code Point
3922 */
3923 skb->vlan_tci = 0;
3924 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003925
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08003927
3928 /* deliver only exact match when indicated */
3929 if (likely(!deliver_exact)) {
3930 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3931 &ptype_base[ntohs(type) &
3932 PTYPE_HASH_MASK]);
3933 }
3934
3935 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3936 &orig_dev->ptype_specific);
3937
3938 if (unlikely(skb->dev != orig_dev)) {
3939 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3940 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 }
3942
3943 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003944 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003945 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003946 else
3947 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003949drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003950 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 kfree_skb(skb);
3952 /* Jamal, now you will not able to escape explaining
3953 * me how you were going to use this. :-)
3954 */
3955 ret = NET_RX_DROP;
3956 }
3957
Julian Anastasov2c17d272015-07-09 09:59:10 +03003958out:
David S. Miller9754e292013-02-14 15:57:38 -05003959 return ret;
3960}
3961
3962static int __netif_receive_skb(struct sk_buff *skb)
3963{
3964 int ret;
3965
3966 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3967 unsigned long pflags = current->flags;
3968
3969 /*
3970 * PFMEMALLOC skbs are special, they should
3971 * - be delivered to SOCK_MEMALLOC sockets only
3972 * - stay away from userspace
3973 * - have bounded memory usage
3974 *
3975 * Use PF_MEMALLOC as this saves us from propagating the allocation
3976 * context down to all allocation sites.
3977 */
3978 current->flags |= PF_MEMALLOC;
3979 ret = __netif_receive_skb_core(skb, true);
3980 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3981 } else
3982 ret = __netif_receive_skb_core(skb, false);
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 return ret;
3985}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003986
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003987static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003988{
Julian Anastasov2c17d272015-07-09 09:59:10 +03003989 int ret;
3990
Eric Dumazet588f0332011-11-15 04:12:55 +00003991 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003992
Richard Cochranc1f19b52010-07-17 08:49:36 +00003993 if (skb_defer_rx_timestamp(skb))
3994 return NET_RX_SUCCESS;
3995
Julian Anastasov2c17d272015-07-09 09:59:10 +03003996 rcu_read_lock();
3997
Eric Dumazetdf334542010-03-24 19:13:54 +00003998#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003999 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07004000 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03004001 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07004002
Eric Dumazet3b098e22010-05-15 23:57:10 -07004003 if (cpu >= 0) {
4004 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4005 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00004006 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004007 }
Tom Herbertfec5e652010-04-16 16:01:27 -07004008 }
Tom Herbert1e94d722010-03-18 17:45:44 -07004009#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03004010 ret = __netif_receive_skb(skb);
4011 rcu_read_unlock();
4012 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004013}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004014
4015/**
4016 * netif_receive_skb - process receive buffer from network
4017 * @skb: buffer to process
4018 *
4019 * netif_receive_skb() is the main receive data processing function.
4020 * It always succeeds. The buffer may be dropped during processing
4021 * for congestion control or by the protocol layers.
4022 *
4023 * This function may only be called from softirq context and interrupts
4024 * should be enabled.
4025 *
4026 * Return values (usually ignored):
4027 * NET_RX_SUCCESS: no congestion
4028 * NET_RX_DROP: packet was dropped
4029 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004030int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004031{
4032 trace_netif_receive_skb_entry(skb);
4033
4034 return netif_receive_skb_internal(skb);
4035}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05004036EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037
Eric Dumazet88751272010-04-19 05:07:33 +00004038/* Network device is going away, flush any packets still pending
4039 * Called with irqs disabled.
4040 */
Changli Gao152102c2010-03-30 20:16:22 +00004041static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004042{
Changli Gao152102c2010-03-30 20:16:22 +00004043 struct net_device *dev = arg;
Christoph Lameter903ceff2014-08-17 12:30:35 -05004044 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004045 struct sk_buff *skb, *tmp;
4046
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004047 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004048 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004049 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004050 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004051 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004052 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004053 }
Changli Gao6e7676c2010-04-27 15:07:33 -07004054 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004055 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004056
4057 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4058 if (skb->dev == dev) {
4059 __skb_unlink(skb, &sd->process_queue);
4060 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004061 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07004062 }
4063 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004064}
4065
Herbert Xud565b0a2008-12-15 23:38:52 -08004066static int napi_gro_complete(struct sk_buff *skb)
4067{
Vlad Yasevich22061d82012-11-15 08:49:11 +00004068 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004069 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004070 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08004071 int err = -ENOENT;
4072
Eric Dumazetc3c7c252012-12-06 13:54:59 +00004073 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4074
Herbert Xufc59f9a2009-04-14 15:11:06 -07004075 if (NAPI_GRO_CB(skb)->count == 1) {
4076 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004077 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07004078 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004079
4080 rcu_read_lock();
4081 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004082 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08004083 continue;
4084
Jerry Chu299603e82013-12-11 20:53:45 -08004085 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004086 break;
4087 }
4088 rcu_read_unlock();
4089
4090 if (err) {
4091 WARN_ON(&ptype->list == head);
4092 kfree_skb(skb);
4093 return NET_RX_SUCCESS;
4094 }
4095
4096out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004097 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004098}
4099
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004100/* napi->gro_list contains packets ordered by age.
4101 * youngest packets at the head of it.
4102 * Complete skbs in reverse order to reduce latencies.
4103 */
4104void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08004105{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004106 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004107
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004108 /* scan list and build reverse chain */
4109 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4110 skb->prev = prev;
4111 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08004112 }
4113
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004114 for (skb = prev; skb; skb = prev) {
4115 skb->next = NULL;
4116
4117 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4118 return;
4119
4120 prev = skb->prev;
4121 napi_gro_complete(skb);
4122 napi->gro_count--;
4123 }
4124
Herbert Xud565b0a2008-12-15 23:38:52 -08004125 napi->gro_list = NULL;
4126}
Eric Dumazet86cac582010-08-31 18:25:32 +00004127EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08004128
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004129static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4130{
4131 struct sk_buff *p;
4132 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08004133 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004134
4135 for (p = napi->gro_list; p; p = p->next) {
4136 unsigned long diffs;
4137
Tom Herbert0b4cec82014-01-15 08:58:06 -08004138 NAPI_GRO_CB(p)->flush = 0;
4139
4140 if (hash != skb_get_hash_raw(p)) {
4141 NAPI_GRO_CB(p)->same_flow = 0;
4142 continue;
4143 }
4144
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004145 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4146 diffs |= p->vlan_tci ^ skb->vlan_tci;
4147 if (maclen == ETH_HLEN)
4148 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004149 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004150 else if (!diffs)
4151 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07004152 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004153 maclen);
4154 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004155 }
4156}
4157
Jerry Chu299603e82013-12-11 20:53:45 -08004158static void skb_gro_reset_offset(struct sk_buff *skb)
4159{
4160 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4161 const skb_frag_t *frag0 = &pinfo->frags[0];
4162
4163 NAPI_GRO_CB(skb)->data_offset = 0;
4164 NAPI_GRO_CB(skb)->frag0 = NULL;
4165 NAPI_GRO_CB(skb)->frag0_len = 0;
4166
4167 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4168 pinfo->nr_frags &&
4169 !PageHighMem(skb_frag_page(frag0))) {
4170 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4171 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004172 }
4173}
4174
Eric Dumazeta50e2332014-03-29 21:28:21 -07004175static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4176{
4177 struct skb_shared_info *pinfo = skb_shinfo(skb);
4178
4179 BUG_ON(skb->end - skb->tail < grow);
4180
4181 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4182
4183 skb->data_len -= grow;
4184 skb->tail += grow;
4185
4186 pinfo->frags[0].page_offset += grow;
4187 skb_frag_size_sub(&pinfo->frags[0], grow);
4188
4189 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4190 skb_frag_unref(skb, 0);
4191 memmove(pinfo->frags, pinfo->frags + 1,
4192 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4193 }
4194}
4195
Rami Rosenbb728822012-11-28 21:55:25 +00004196static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004197{
4198 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004199 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004200 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004201 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004202 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004203 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004204 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004205
Eric W. Biederman9c62a682014-03-14 20:51:52 -07004206 if (!(skb->dev->features & NETIF_F_GRO))
Herbert Xud565b0a2008-12-15 23:38:52 -08004207 goto normal;
4208
Tom Herbert5a212322014-08-31 15:12:41 -07004209 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
Herbert Xuf17f5c92009-01-14 14:36:12 -08004210 goto normal;
4211
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004212 gro_list_prepare(napi, skb);
4213
Herbert Xud565b0a2008-12-15 23:38:52 -08004214 rcu_read_lock();
4215 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004216 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004217 continue;
4218
Herbert Xu86911732009-01-29 14:19:50 +00004219 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004220 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004221 NAPI_GRO_CB(skb)->same_flow = 0;
4222 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08004223 NAPI_GRO_CB(skb)->free = 0;
Or Gerlitzb582ef02014-01-20 13:59:19 +02004224 NAPI_GRO_CB(skb)->udp_mark = 0;
Tom Herbert15e23962015-02-10 16:30:31 -08004225 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004226
Tom Herbert662880f2014-08-27 21:26:56 -07004227 /* Setup for GRO checksum validation */
4228 switch (skb->ip_summed) {
4229 case CHECKSUM_COMPLETE:
4230 NAPI_GRO_CB(skb)->csum = skb->csum;
4231 NAPI_GRO_CB(skb)->csum_valid = 1;
4232 NAPI_GRO_CB(skb)->csum_cnt = 0;
4233 break;
4234 case CHECKSUM_UNNECESSARY:
4235 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4236 NAPI_GRO_CB(skb)->csum_valid = 0;
4237 break;
4238 default:
4239 NAPI_GRO_CB(skb)->csum_cnt = 0;
4240 NAPI_GRO_CB(skb)->csum_valid = 0;
4241 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004242
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004243 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004244 break;
4245 }
4246 rcu_read_unlock();
4247
4248 if (&ptype->list == head)
4249 goto normal;
4250
Herbert Xu0da2afd52008-12-26 14:57:42 -08004251 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004252 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004253
Herbert Xud565b0a2008-12-15 23:38:52 -08004254 if (pp) {
4255 struct sk_buff *nskb = *pp;
4256
4257 *pp = nskb->next;
4258 nskb->next = NULL;
4259 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00004260 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08004261 }
4262
Herbert Xu0da2afd52008-12-26 14:57:42 -08004263 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08004264 goto ok;
4265
Eric Dumazet600adc12014-01-09 14:12:19 -08004266 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08004267 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08004268
Eric Dumazet600adc12014-01-09 14:12:19 -08004269 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4270 struct sk_buff *nskb = napi->gro_list;
4271
4272 /* locate the end of the list to select the 'oldest' flow */
4273 while (nskb->next) {
4274 pp = &nskb->next;
4275 nskb = *pp;
4276 }
4277 *pp = NULL;
4278 nskb->next = NULL;
4279 napi_gro_complete(nskb);
4280 } else {
4281 napi->gro_count++;
4282 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004283 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004284 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004285 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004286 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004287 skb->next = napi->gro_list;
4288 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004289 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004290
Herbert Xuad0f9902009-02-01 01:24:55 -08004291pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004292 grow = skb_gro_offset(skb) - skb_headlen(skb);
4293 if (grow > 0)
4294 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004295ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004296 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004297
4298normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004299 ret = GRO_NORMAL;
4300 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004301}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004302
Jerry Chubf5a7552014-01-07 10:23:19 -08004303struct packet_offload *gro_find_receive_by_type(__be16 type)
4304{
4305 struct list_head *offload_head = &offload_base;
4306 struct packet_offload *ptype;
4307
4308 list_for_each_entry_rcu(ptype, offload_head, list) {
4309 if (ptype->type != type || !ptype->callbacks.gro_receive)
4310 continue;
4311 return ptype;
4312 }
4313 return NULL;
4314}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004315EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004316
4317struct packet_offload *gro_find_complete_by_type(__be16 type)
4318{
4319 struct list_head *offload_head = &offload_base;
4320 struct packet_offload *ptype;
4321
4322 list_for_each_entry_rcu(ptype, offload_head, list) {
4323 if (ptype->type != type || !ptype->callbacks.gro_complete)
4324 continue;
4325 return ptype;
4326 }
4327 return NULL;
4328}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004329EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004330
Rami Rosenbb728822012-11-28 21:55:25 +00004331static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004332{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004333 switch (ret) {
4334 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004335 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004336 ret = GRO_DROP;
4337 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004338
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004339 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004340 kfree_skb(skb);
4341 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004342
Eric Dumazetdaa86542012-04-19 07:07:40 +00004343 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00004344 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4345 kmem_cache_free(skbuff_head_cache, skb);
4346 else
4347 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00004348 break;
4349
Ben Hutchings5b252f02009-10-29 07:17:09 +00004350 case GRO_HELD:
4351 case GRO_MERGED:
4352 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004353 }
4354
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004355 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004356}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004357
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004358gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004359{
Eric Dumazet93f93a42015-11-18 06:30:59 -08004360 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004361 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004362
Eric Dumazeta50e2332014-03-29 21:28:21 -07004363 skb_gro_reset_offset(skb);
4364
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004365 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004366}
4367EXPORT_SYMBOL(napi_gro_receive);
4368
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004369static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004370{
Eric Dumazet93a35f52014-10-23 06:30:30 -07004371 if (unlikely(skb->pfmemalloc)) {
4372 consume_skb(skb);
4373 return;
4374 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004375 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004376 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4377 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004378 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004379 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004380 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07004381 skb->encapsulation = 0;
4382 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004383 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08004384
4385 napi->skb = skb;
4386}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004387
Herbert Xu76620aa2009-04-16 02:02:07 -07004388struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004389{
Herbert Xu5d38a072009-01-04 16:13:40 -08004390 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004391
4392 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08004393 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08004394 if (skb) {
4395 napi->skb = skb;
4396 skb_mark_napi_id(skb, napi);
4397 }
Herbert Xu5d38a072009-01-04 16:13:40 -08004398 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004399 return skb;
4400}
Herbert Xu76620aa2009-04-16 02:02:07 -07004401EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004402
Eric Dumazeta50e2332014-03-29 21:28:21 -07004403static gro_result_t napi_frags_finish(struct napi_struct *napi,
4404 struct sk_buff *skb,
4405 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004406{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004407 switch (ret) {
4408 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004409 case GRO_HELD:
4410 __skb_push(skb, ETH_HLEN);
4411 skb->protocol = eth_type_trans(skb, skb->dev);
4412 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004413 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004414 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004415
4416 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004417 case GRO_MERGED_FREE:
4418 napi_reuse_skb(napi, skb);
4419 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004420
4421 case GRO_MERGED:
4422 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004423 }
4424
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004425 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004426}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004427
Eric Dumazeta50e2332014-03-29 21:28:21 -07004428/* Upper GRO stack assumes network header starts at gro_offset=0
4429 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4430 * We copy ethernet header into skb->data to have a common layout.
4431 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004432static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004433{
Herbert Xu76620aa2009-04-16 02:02:07 -07004434 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004435 const struct ethhdr *eth;
4436 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07004437
4438 napi->skb = NULL;
4439
Eric Dumazeta50e2332014-03-29 21:28:21 -07004440 skb_reset_mac_header(skb);
4441 skb_gro_reset_offset(skb);
4442
4443 eth = skb_gro_header_fast(skb, 0);
4444 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4445 eth = skb_gro_header_slow(skb, hlen, 0);
4446 if (unlikely(!eth)) {
4447 napi_reuse_skb(napi, skb);
4448 return NULL;
4449 }
4450 } else {
4451 gro_pull_from_frag0(skb, hlen);
4452 NAPI_GRO_CB(skb)->frag0 += hlen;
4453 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07004454 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07004455 __skb_pull(skb, hlen);
4456
4457 /*
4458 * This works because the only protocols we care about don't require
4459 * special handling.
4460 * We'll fix it up properly in napi_frags_finish()
4461 */
4462 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07004463
Herbert Xu76620aa2009-04-16 02:02:07 -07004464 return skb;
4465}
Herbert Xu76620aa2009-04-16 02:02:07 -07004466
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004467gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004468{
4469 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004470
4471 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004472 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004473
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004474 trace_napi_gro_frags_entry(skb);
4475
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004476 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004477}
4478EXPORT_SYMBOL(napi_gro_frags);
4479
Tom Herbert573e8fc2014-08-22 13:33:47 -07004480/* Compute the checksum from gro_offset and return the folded value
4481 * after adding in any pseudo checksum.
4482 */
4483__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4484{
4485 __wsum wsum;
4486 __sum16 sum;
4487
4488 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4489
4490 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4491 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4492 if (likely(!sum)) {
4493 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4494 !skb->csum_complete_sw)
4495 netdev_rx_csum_fault(skb->dev);
4496 }
4497
4498 NAPI_GRO_CB(skb)->csum = wsum;
4499 NAPI_GRO_CB(skb)->csum_valid = 1;
4500
4501 return sum;
4502}
4503EXPORT_SYMBOL(__skb_gro_checksum_complete);
4504
Eric Dumazete326bed2010-04-22 00:22:45 -07004505/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08004506 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07004507 * Note: called with local irq disabled, but exits with local irq enabled.
4508 */
4509static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4510{
4511#ifdef CONFIG_RPS
4512 struct softnet_data *remsd = sd->rps_ipi_list;
4513
4514 if (remsd) {
4515 sd->rps_ipi_list = NULL;
4516
4517 local_irq_enable();
4518
4519 /* Send pending IPI's to kick RPS processing on remote cpus. */
4520 while (remsd) {
4521 struct softnet_data *next = remsd->rps_ipi_next;
4522
4523 if (cpu_online(remsd->cpu))
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +01004524 smp_call_function_single_async(remsd->cpu,
Frederic Weisbeckerfce8ad12014-02-24 16:40:01 +01004525 &remsd->csd);
Eric Dumazete326bed2010-04-22 00:22:45 -07004526 remsd = next;
4527 }
4528 } else
4529#endif
4530 local_irq_enable();
4531}
4532
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004533static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4534{
4535#ifdef CONFIG_RPS
4536 return sd->rps_ipi_list != NULL;
4537#else
4538 return false;
4539#endif
4540}
4541
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004542static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543{
4544 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004545 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546
Eric Dumazete326bed2010-04-22 00:22:45 -07004547 /* Check if we have pending ipi, its better to send them now,
4548 * not waiting net_rx_action() end.
4549 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004550 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07004551 local_irq_disable();
4552 net_rps_action_and_irq_enable(sd);
4553 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004554
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004555 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004556 local_irq_disable();
Tom Herbert11ef7a82014-06-30 09:50:40 -07004557 while (1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559
Changli Gao6e7676c2010-04-27 15:07:33 -07004560 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03004561 rcu_read_lock();
Eric Dumazete4008272010-04-05 15:42:39 -07004562 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004563 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03004564 rcu_read_unlock();
Changli Gao6e7676c2010-04-27 15:07:33 -07004565 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004566 input_queue_head_incr(sd);
4567 if (++work >= quota) {
4568 local_irq_enable();
4569 return work;
4570 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572
Changli Gao6e7676c2010-04-27 15:07:33 -07004573 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07004574 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004575 /*
4576 * Inline a custom version of __napi_complete().
4577 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07004578 * and NAPI_STATE_SCHED is the only possible flag set
4579 * on backlog.
4580 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004581 * and we dont need an smp_mb() memory barrier.
4582 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004583 napi->state = 0;
Tom Herbert11ef7a82014-06-30 09:50:40 -07004584 rps_unlock(sd);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004585
Tom Herbert11ef7a82014-06-30 09:50:40 -07004586 break;
Changli Gao6e7676c2010-04-27 15:07:33 -07004587 }
Tom Herbert11ef7a82014-06-30 09:50:40 -07004588
4589 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4590 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07004591 rps_unlock(sd);
4592 }
4593 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004595 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596}
4597
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004598/**
4599 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004600 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004601 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07004602 * The entry's receive function will be scheduled to run.
4603 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004604 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004605void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004606{
4607 unsigned long flags;
4608
4609 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05004610 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004611 local_irq_restore(flags);
4612}
4613EXPORT_SYMBOL(__napi_schedule);
4614
Eric Dumazetbc9ad162014-10-28 18:05:13 -07004615/**
4616 * __napi_schedule_irqoff - schedule for receive
4617 * @n: entry to schedule
4618 *
4619 * Variant of __napi_schedule() assuming hard irqs are masked
4620 */
4621void __napi_schedule_irqoff(struct napi_struct *n)
4622{
4623 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4624}
4625EXPORT_SYMBOL(__napi_schedule_irqoff);
4626
Herbert Xud565b0a2008-12-15 23:38:52 -08004627void __napi_complete(struct napi_struct *n)
4628{
4629 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
Herbert Xud565b0a2008-12-15 23:38:52 -08004630
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004631 list_del_init(&n->poll_list);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004632 smp_mb__before_atomic();
Herbert Xud565b0a2008-12-15 23:38:52 -08004633 clear_bit(NAPI_STATE_SCHED, &n->state);
4634}
4635EXPORT_SYMBOL(__napi_complete);
4636
Eric Dumazet3b47d302014-11-06 21:09:44 -08004637void napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08004638{
4639 unsigned long flags;
4640
4641 /*
4642 * don't let napi dequeue from the cpu poll list
4643 * just in case its running on a different cpu
4644 */
4645 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4646 return;
4647
Eric Dumazet3b47d302014-11-06 21:09:44 -08004648 if (n->gro_list) {
4649 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004650
Eric Dumazet3b47d302014-11-06 21:09:44 -08004651 if (work_done)
4652 timeout = n->dev->gro_flush_timeout;
4653
4654 if (timeout)
4655 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4656 HRTIMER_MODE_REL_PINNED);
4657 else
4658 napi_gro_flush(n, false);
4659 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004660 if (likely(list_empty(&n->poll_list))) {
4661 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4662 } else {
4663 /* If n->poll_list is not empty, we need to mask irqs */
4664 local_irq_save(flags);
4665 __napi_complete(n);
4666 local_irq_restore(flags);
4667 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004668}
Eric Dumazet3b47d302014-11-06 21:09:44 -08004669EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08004670
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004671/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08004672static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004673{
4674 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4675 struct napi_struct *napi;
4676
4677 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4678 if (napi->napi_id == napi_id)
4679 return napi;
4680
4681 return NULL;
4682}
Eric Dumazet02d62e82015-11-18 06:30:52 -08004683
4684#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazetce6aea92015-11-18 06:30:54 -08004685#define BUSY_POLL_BUDGET 8
Eric Dumazet02d62e82015-11-18 06:30:52 -08004686bool sk_busy_loop(struct sock *sk, int nonblock)
4687{
4688 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
Eric Dumazetce6aea92015-11-18 06:30:54 -08004689 int (*busy_poll)(struct napi_struct *dev);
Eric Dumazet02d62e82015-11-18 06:30:52 -08004690 struct napi_struct *napi;
4691 int rc = false;
4692
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004693 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004694
4695 napi = napi_by_id(sk->sk_napi_id);
4696 if (!napi)
4697 goto out;
4698
Eric Dumazetce6aea92015-11-18 06:30:54 -08004699 /* Note: ndo_busy_poll method is optional in linux-4.5 */
4700 busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
Eric Dumazet02d62e82015-11-18 06:30:52 -08004701
4702 do {
Eric Dumazetce6aea92015-11-18 06:30:54 -08004703 rc = 0;
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004704 local_bh_disable();
Eric Dumazetce6aea92015-11-18 06:30:54 -08004705 if (busy_poll) {
4706 rc = busy_poll(napi);
4707 } else if (napi_schedule_prep(napi)) {
4708 void *have = netpoll_poll_lock(napi);
4709
4710 if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
4711 rc = napi->poll(napi, BUSY_POLL_BUDGET);
4712 trace_napi_poll(napi);
4713 if (rc == BUSY_POLL_BUDGET) {
4714 napi_complete_done(napi, rc);
4715 napi_schedule(napi);
4716 }
4717 }
4718 netpoll_poll_unlock(have);
4719 }
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004720 if (rc > 0)
4721 NET_ADD_STATS_BH(sock_net(sk),
4722 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
4723 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004724
4725 if (rc == LL_FLUSH_FAILED)
4726 break; /* permanent failure */
4727
Eric Dumazet02d62e82015-11-18 06:30:52 -08004728 cpu_relax();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004729 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
4730 !need_resched() && !busy_loop_timeout(end_time));
4731
4732 rc = !skb_queue_empty(&sk->sk_receive_queue);
4733out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08004734 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08004735 return rc;
4736}
4737EXPORT_SYMBOL(sk_busy_loop);
4738
4739#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004740
4741void napi_hash_add(struct napi_struct *napi)
4742{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08004743 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
4744 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004745 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004746
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004747 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004748
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004749 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4750 do {
4751 if (unlikely(++napi_gen_id < NR_CPUS + 1))
4752 napi_gen_id = NR_CPUS + 1;
4753 } while (napi_by_id(napi_gen_id));
4754 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004755
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004756 hlist_add_head_rcu(&napi->napi_hash_node,
4757 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004758
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004759 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004760}
4761EXPORT_SYMBOL_GPL(napi_hash_add);
4762
4763/* Warning : caller is responsible to make sure rcu grace period
4764 * is respected before freeing memory containing @napi
4765 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08004766bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004767{
Eric Dumazet34cbe272015-11-18 06:31:02 -08004768 bool rcu_sync_needed = false;
4769
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004770 spin_lock(&napi_hash_lock);
4771
Eric Dumazet34cbe272015-11-18 06:31:02 -08004772 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
4773 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004774 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08004775 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004776 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08004777 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004778}
4779EXPORT_SYMBOL_GPL(napi_hash_del);
4780
Eric Dumazet3b47d302014-11-06 21:09:44 -08004781static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4782{
4783 struct napi_struct *napi;
4784
4785 napi = container_of(timer, struct napi_struct, timer);
4786 if (napi->gro_list)
4787 napi_schedule(napi);
4788
4789 return HRTIMER_NORESTART;
4790}
4791
Herbert Xud565b0a2008-12-15 23:38:52 -08004792void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4793 int (*poll)(struct napi_struct *, int), int weight)
4794{
4795 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08004796 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4797 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00004798 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004799 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004800 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004801 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004802 if (weight > NAPI_POLL_WEIGHT)
4803 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4804 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004805 napi->weight = weight;
4806 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004807 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004808#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004809 spin_lock_init(&napi->poll_lock);
4810 napi->poll_owner = -1;
4811#endif
4812 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08004813 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004814}
4815EXPORT_SYMBOL(netif_napi_add);
4816
Eric Dumazet3b47d302014-11-06 21:09:44 -08004817void napi_disable(struct napi_struct *n)
4818{
4819 might_sleep();
4820 set_bit(NAPI_STATE_DISABLE, &n->state);
4821
4822 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4823 msleep(1);
Neil Horman2d8bff12015-09-23 14:57:58 -04004824 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
4825 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08004826
4827 hrtimer_cancel(&n->timer);
4828
4829 clear_bit(NAPI_STATE_DISABLE, &n->state);
4830}
4831EXPORT_SYMBOL(napi_disable);
4832
Eric Dumazet93d05d42015-11-18 06:31:03 -08004833/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08004834void netif_napi_del(struct napi_struct *napi)
4835{
Eric Dumazet93d05d42015-11-18 06:31:03 -08004836 might_sleep();
4837 if (napi_hash_del(napi))
4838 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004839 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004840 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004841
Eric Dumazet289dccb2013-12-20 14:29:08 -08004842 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004843 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004844 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004845}
4846EXPORT_SYMBOL(netif_napi_del);
4847
Herbert Xu726ce702014-12-21 07:16:21 +11004848static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4849{
4850 void *have;
4851 int work, weight;
4852
4853 list_del_init(&n->poll_list);
4854
4855 have = netpoll_poll_lock(n);
4856
4857 weight = n->weight;
4858
4859 /* This NAPI_STATE_SCHED test is for avoiding a race
4860 * with netpoll's poll_napi(). Only the entity which
4861 * obtains the lock and sees NAPI_STATE_SCHED set will
4862 * actually make the ->poll() call. Therefore we avoid
4863 * accidentally calling ->poll() when NAPI is not scheduled.
4864 */
4865 work = 0;
4866 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4867 work = n->poll(n, weight);
4868 trace_napi_poll(n);
4869 }
4870
4871 WARN_ON_ONCE(work > weight);
4872
4873 if (likely(work < weight))
4874 goto out_unlock;
4875
4876 /* Drivers must not modify the NAPI state if they
4877 * consume the entire weight. In such cases this code
4878 * still "owns" the NAPI instance and therefore can
4879 * move the instance around on the list at-will.
4880 */
4881 if (unlikely(napi_disable_pending(n))) {
4882 napi_complete(n);
4883 goto out_unlock;
4884 }
4885
4886 if (n->gro_list) {
4887 /* flush too old packets
4888 * If HZ < 1000, flush all packets.
4889 */
4890 napi_gro_flush(n, HZ >= 1000);
4891 }
4892
Herbert Xu001ce542014-12-21 07:16:22 +11004893 /* Some drivers may have called napi_schedule
4894 * prior to exhausting their budget.
4895 */
4896 if (unlikely(!list_empty(&n->poll_list))) {
4897 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4898 n->dev ? n->dev->name : "backlog");
4899 goto out_unlock;
4900 }
4901
Herbert Xu726ce702014-12-21 07:16:21 +11004902 list_add_tail(&n->poll_list, repoll);
4903
4904out_unlock:
4905 netpoll_poll_unlock(have);
4906
4907 return work;
4908}
4909
Linus Torvalds1da177e2005-04-16 15:20:36 -07004910static void net_rx_action(struct softirq_action *h)
4911{
Christoph Lameter903ceff2014-08-17 12:30:35 -05004912 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004913 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004914 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004915 LIST_HEAD(list);
4916 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07004917
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004919 list_splice_init(&sd->poll_list, &list);
4920 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921
Herbert Xuceb8d5b2014-12-21 07:16:25 +11004922 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004923 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924
Herbert Xuceb8d5b2014-12-21 07:16:25 +11004925 if (list_empty(&list)) {
4926 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4927 return;
4928 break;
4929 }
4930
Herbert Xu6bd373e2014-12-21 07:16:24 +11004931 n = list_first_entry(&list, struct napi_struct, poll_list);
4932 budget -= napi_poll(n, &repoll);
4933
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004934 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004935 * Allow this to run for 2 jiffies since which will allow
4936 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004937 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11004938 if (unlikely(budget <= 0 ||
4939 time_after_eq(jiffies, time_limit))) {
4940 sd->time_squeeze++;
4941 break;
4942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004944
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004945 local_irq_disable();
4946
4947 list_splice_tail_init(&sd->poll_list, &list);
4948 list_splice_tail(&repoll, &list);
4949 list_splice(&list, &sd->poll_list);
4950 if (!list_empty(&sd->poll_list))
4951 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4952
Eric Dumazete326bed2010-04-22 00:22:45 -07004953 net_rps_action_and_irq_enable(sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954}
4955
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004956struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004957 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004958
4959 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004960 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004961
Veaceslav Falico5d261912013-08-28 23:25:05 +02004962 /* counter for the number of times this device was added to us */
4963 u16 ref_nr;
4964
Veaceslav Falico402dae92013-09-25 09:20:09 +02004965 /* private field for the users */
4966 void *private;
4967
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004968 struct list_head list;
4969 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004970};
4971
Michal Kubeček6ea29da2015-09-24 10:59:05 +02004972static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004973 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004974{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004975 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004976
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004977 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004978 if (adj->dev == adj_dev)
4979 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004980 }
4981 return NULL;
4982}
4983
4984/**
4985 * netdev_has_upper_dev - Check if device is linked to an upper device
4986 * @dev: device
4987 * @upper_dev: upper device to check
4988 *
4989 * Find out if a device is linked to specified upper device and return true
4990 * in case it is. Note that this checks only immediate upper device,
4991 * not through a complete stack of devices. The caller must hold the RTNL lock.
4992 */
4993bool netdev_has_upper_dev(struct net_device *dev,
4994 struct net_device *upper_dev)
4995{
4996 ASSERT_RTNL();
4997
Michal Kubeček6ea29da2015-09-24 10:59:05 +02004998 return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004999}
5000EXPORT_SYMBOL(netdev_has_upper_dev);
5001
5002/**
5003 * netdev_has_any_upper_dev - Check if device is linked to some device
5004 * @dev: device
5005 *
5006 * Find out if a device is linked to an upper device and return true in case
5007 * it is. The caller must hold the RTNL lock.
5008 */
stephen hemminger1d143d92013-12-29 14:01:29 -08005009static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005010{
5011 ASSERT_RTNL();
5012
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005013 return !list_empty(&dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005014}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005015
5016/**
5017 * netdev_master_upper_dev_get - Get master upper device
5018 * @dev: device
5019 *
5020 * Find a master upper device and return pointer to it or NULL in case
5021 * it's not there. The caller must hold the RTNL lock.
5022 */
5023struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5024{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005025 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005026
5027 ASSERT_RTNL();
5028
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005029 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005030 return NULL;
5031
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005032 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005033 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005034 if (likely(upper->master))
5035 return upper->dev;
5036 return NULL;
5037}
5038EXPORT_SYMBOL(netdev_master_upper_dev_get);
5039
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02005040void *netdev_adjacent_get_private(struct list_head *adj_list)
5041{
5042 struct netdev_adjacent *adj;
5043
5044 adj = list_entry(adj_list, struct netdev_adjacent, list);
5045
5046 return adj->private;
5047}
5048EXPORT_SYMBOL(netdev_adjacent_get_private);
5049
Veaceslav Falico31088a12013-09-25 09:20:12 +02005050/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04005051 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5052 * @dev: device
5053 * @iter: list_head ** of the current position
5054 *
5055 * Gets the next device from the dev's upper list, starting from iter
5056 * position. The caller must hold RCU read lock.
5057 */
5058struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5059 struct list_head **iter)
5060{
5061 struct netdev_adjacent *upper;
5062
5063 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5064
5065 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5066
5067 if (&upper->list == &dev->adj_list.upper)
5068 return NULL;
5069
5070 *iter = &upper->list;
5071
5072 return upper->dev;
5073}
5074EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5075
5076/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005077 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
Veaceslav Falico48311f42013-08-28 23:25:07 +02005078 * @dev: device
5079 * @iter: list_head ** of the current position
5080 *
5081 * Gets the next device from the dev's upper list, starting from iter
5082 * position. The caller must hold RCU read lock.
5083 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005084struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
5085 struct list_head **iter)
Veaceslav Falico48311f42013-08-28 23:25:07 +02005086{
5087 struct netdev_adjacent *upper;
5088
John Fastabend85328242013-11-26 06:33:52 +00005089 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
Veaceslav Falico48311f42013-08-28 23:25:07 +02005090
5091 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5092
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005093 if (&upper->list == &dev->all_adj_list.upper)
Veaceslav Falico48311f42013-08-28 23:25:07 +02005094 return NULL;
5095
5096 *iter = &upper->list;
5097
5098 return upper->dev;
5099}
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005100EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
Veaceslav Falico48311f42013-08-28 23:25:07 +02005101
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005102/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02005103 * netdev_lower_get_next_private - Get the next ->private from the
5104 * lower neighbour list
5105 * @dev: device
5106 * @iter: list_head ** of the current position
5107 *
5108 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5109 * list, starting from iter position. The caller must hold either hold the
5110 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005111 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02005112 */
5113void *netdev_lower_get_next_private(struct net_device *dev,
5114 struct list_head **iter)
5115{
5116 struct netdev_adjacent *lower;
5117
5118 lower = list_entry(*iter, struct netdev_adjacent, list);
5119
5120 if (&lower->list == &dev->adj_list.lower)
5121 return NULL;
5122
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005123 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005124
5125 return lower->private;
5126}
5127EXPORT_SYMBOL(netdev_lower_get_next_private);
5128
5129/**
5130 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5131 * lower neighbour list, RCU
5132 * variant
5133 * @dev: device
5134 * @iter: list_head ** of the current position
5135 *
5136 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5137 * list, starting from iter position. The caller must hold RCU read lock.
5138 */
5139void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5140 struct list_head **iter)
5141{
5142 struct netdev_adjacent *lower;
5143
5144 WARN_ON_ONCE(!rcu_read_lock_held());
5145
5146 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5147
5148 if (&lower->list == &dev->adj_list.lower)
5149 return NULL;
5150
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02005151 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02005152
5153 return lower->private;
5154}
5155EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5156
5157/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005158 * netdev_lower_get_next - Get the next device from the lower neighbour
5159 * list
5160 * @dev: device
5161 * @iter: list_head ** of the current position
5162 *
5163 * Gets the next netdev_adjacent from the dev's lower neighbour
5164 * list, starting from iter position. The caller must hold RTNL lock or
5165 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00005166 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005167 */
5168void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5169{
5170 struct netdev_adjacent *lower;
5171
5172 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5173
5174 if (&lower->list == &dev->adj_list.lower)
5175 return NULL;
5176
5177 *iter = &lower->list;
5178
5179 return lower->dev;
5180}
5181EXPORT_SYMBOL(netdev_lower_get_next);
5182
5183/**
dingtianhonge001bfa2013-12-13 10:19:55 +08005184 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5185 * lower neighbour list, RCU
5186 * variant
5187 * @dev: device
5188 *
5189 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5190 * list. The caller must hold RCU read lock.
5191 */
5192void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5193{
5194 struct netdev_adjacent *lower;
5195
5196 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5197 struct netdev_adjacent, list);
5198 if (lower)
5199 return lower->private;
5200 return NULL;
5201}
5202EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5203
5204/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005205 * netdev_master_upper_dev_get_rcu - Get master upper device
5206 * @dev: device
5207 *
5208 * Find a master upper device and return pointer to it or NULL in case
5209 * it's not there. The caller must hold the RCU read lock.
5210 */
5211struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5212{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005213 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005214
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005215 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02005216 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005217 if (upper && likely(upper->master))
5218 return upper->dev;
5219 return NULL;
5220}
5221EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5222
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305223static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005224 struct net_device *adj_dev,
5225 struct list_head *dev_list)
5226{
5227 char linkname[IFNAMSIZ+7];
5228 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5229 "upper_%s" : "lower_%s", adj_dev->name);
5230 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5231 linkname);
5232}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05305233static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005234 char *name,
5235 struct list_head *dev_list)
5236{
5237 char linkname[IFNAMSIZ+7];
5238 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5239 "upper_%s" : "lower_%s", name);
5240 sysfs_remove_link(&(dev->dev.kobj), linkname);
5241}
5242
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005243static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5244 struct net_device *adj_dev,
5245 struct list_head *dev_list)
5246{
5247 return (dev_list == &dev->adj_list.upper ||
5248 dev_list == &dev->adj_list.lower) &&
5249 net_eq(dev_net(dev), dev_net(adj_dev));
5250}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005251
Veaceslav Falico5d261912013-08-28 23:25:05 +02005252static int __netdev_adjacent_dev_insert(struct net_device *dev,
5253 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02005254 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005255 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005256{
5257 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005258 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005259
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005260 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005261
5262 if (adj) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005263 adj->ref_nr++;
5264 return 0;
5265 }
5266
5267 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5268 if (!adj)
5269 return -ENOMEM;
5270
5271 adj->dev = adj_dev;
5272 adj->master = master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005273 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005274 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005275 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005276
5277 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5278 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005279
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005280 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005281 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02005282 if (ret)
5283 goto free_adj;
5284 }
5285
Veaceslav Falico7863c052013-09-25 09:20:06 +02005286 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005287 if (master) {
5288 ret = sysfs_create_link(&(dev->dev.kobj),
5289 &(adj_dev->dev.kobj), "master");
5290 if (ret)
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02005291 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005292
Veaceslav Falico7863c052013-09-25 09:20:06 +02005293 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005294 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02005295 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005296 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005297
5298 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005299
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02005300remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005301 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005302 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005303free_adj:
5304 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02005305 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005306
5307 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005308}
5309
stephen hemminger1d143d92013-12-29 14:01:29 -08005310static void __netdev_adjacent_dev_remove(struct net_device *dev,
5311 struct net_device *adj_dev,
5312 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005313{
5314 struct netdev_adjacent *adj;
5315
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005316 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005317
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005318 if (!adj) {
5319 pr_err("tried to remove device %s from %s\n",
5320 dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005321 BUG();
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005322 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005323
5324 if (adj->ref_nr > 1) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005325 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5326 adj->ref_nr-1);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005327 adj->ref_nr--;
5328 return;
5329 }
5330
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005331 if (adj->master)
5332 sysfs_remove_link(&(dev->dev.kobj), "master");
5333
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005334 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005335 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02005336
Veaceslav Falico5d261912013-08-28 23:25:05 +02005337 list_del_rcu(&adj->list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005338 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5339 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005340 dev_put(adj_dev);
5341 kfree_rcu(adj, rcu);
5342}
5343
stephen hemminger1d143d92013-12-29 14:01:29 -08005344static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5345 struct net_device *upper_dev,
5346 struct list_head *up_list,
5347 struct list_head *down_list,
5348 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005349{
5350 int ret;
5351
Veaceslav Falico402dae92013-09-25 09:20:09 +02005352 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5353 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005354 if (ret)
5355 return ret;
5356
Veaceslav Falico402dae92013-09-25 09:20:09 +02005357 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5358 false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005359 if (ret) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005360 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005361 return ret;
5362 }
5363
5364 return 0;
5365}
5366
stephen hemminger1d143d92013-12-29 14:01:29 -08005367static int __netdev_adjacent_dev_link(struct net_device *dev,
5368 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005369{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005370 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5371 &dev->all_adj_list.upper,
5372 &upper_dev->all_adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005373 NULL, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005374}
5375
stephen hemminger1d143d92013-12-29 14:01:29 -08005376static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5377 struct net_device *upper_dev,
5378 struct list_head *up_list,
5379 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005380{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005381 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5382 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005383}
5384
stephen hemminger1d143d92013-12-29 14:01:29 -08005385static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5386 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005387{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005388 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5389 &dev->all_adj_list.upper,
5390 &upper_dev->all_adj_list.lower);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005391}
5392
stephen hemminger1d143d92013-12-29 14:01:29 -08005393static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5394 struct net_device *upper_dev,
5395 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005396{
5397 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5398
5399 if (ret)
5400 return ret;
5401
5402 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5403 &dev->adj_list.upper,
5404 &upper_dev->adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005405 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005406 if (ret) {
5407 __netdev_adjacent_dev_unlink(dev, upper_dev);
5408 return ret;
5409 }
5410
5411 return 0;
5412}
5413
stephen hemminger1d143d92013-12-29 14:01:29 -08005414static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5415 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005416{
5417 __netdev_adjacent_dev_unlink(dev, upper_dev);
5418 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5419 &dev->adj_list.upper,
5420 &upper_dev->adj_list.lower);
5421}
Veaceslav Falico5d261912013-08-28 23:25:05 +02005422
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005423static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005424 struct net_device *upper_dev, bool master,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005425 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005426{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005427 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005428 struct netdev_adjacent *i, *j, *to_i, *to_j;
5429 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005430
5431 ASSERT_RTNL();
5432
5433 if (dev == upper_dev)
5434 return -EBUSY;
5435
5436 /* To prevent loops, check if dev is not upper device to upper_dev. */
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005437 if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005438 return -EBUSY;
5439
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005440 if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005441 return -EEXIST;
5442
5443 if (master && netdev_master_upper_dev_get(dev))
5444 return -EBUSY;
5445
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005446 changeupper_info.upper_dev = upper_dev;
5447 changeupper_info.master = master;
5448 changeupper_info.linking = true;
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005449 changeupper_info.upper_info = upper_info;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005450
Jiri Pirko573c7ba2015-10-16 14:01:22 +02005451 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5452 &changeupper_info.info);
5453 ret = notifier_to_errno(ret);
5454 if (ret)
5455 return ret;
5456
Jiri Pirko6dffb042015-12-03 12:12:10 +01005457 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005458 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005459 if (ret)
5460 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005461
Veaceslav Falico5d261912013-08-28 23:25:05 +02005462 /* Now that we linked these devs, make all the upper_dev's
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005463 * all_adj_list.upper visible to every dev's all_adj_list.lower an
Veaceslav Falico5d261912013-08-28 23:25:05 +02005464 * versa, and don't forget the devices itself. All of these
5465 * links are non-neighbours.
5466 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005467 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5468 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5469 pr_debug("Interlinking %s with %s, non-neighbour\n",
5470 i->dev->name, j->dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005471 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5472 if (ret)
5473 goto rollback_mesh;
5474 }
5475 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005476
Veaceslav Falico5d261912013-08-28 23:25:05 +02005477 /* add dev to every upper_dev's upper device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005478 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5479 pr_debug("linking %s's upper device %s with %s\n",
5480 upper_dev->name, i->dev->name, dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005481 ret = __netdev_adjacent_dev_link(dev, i->dev);
5482 if (ret)
5483 goto rollback_upper_mesh;
5484 }
5485
5486 /* add upper_dev to every dev's lower device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005487 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5488 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5489 i->dev->name, upper_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005490 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5491 if (ret)
5492 goto rollback_lower_mesh;
5493 }
5494
Ido Schimmelb03804e2015-12-03 12:12:03 +01005495 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5496 &changeupper_info.info);
5497 ret = notifier_to_errno(ret);
5498 if (ret)
5499 goto rollback_lower_mesh;
5500
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005501 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005502
5503rollback_lower_mesh:
5504 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005505 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005506 if (i == to_i)
5507 break;
5508 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5509 }
5510
5511 i = NULL;
5512
5513rollback_upper_mesh:
5514 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005515 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005516 if (i == to_i)
5517 break;
5518 __netdev_adjacent_dev_unlink(dev, i->dev);
5519 }
5520
5521 i = j = NULL;
5522
5523rollback_mesh:
5524 to_i = i;
5525 to_j = j;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005526 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5527 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005528 if (i == to_i && j == to_j)
5529 break;
5530 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5531 }
5532 if (i == to_i)
5533 break;
5534 }
5535
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005536 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005537
5538 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005539}
5540
5541/**
5542 * netdev_upper_dev_link - Add a link to the upper device
5543 * @dev: device
5544 * @upper_dev: new upper device
5545 *
5546 * Adds a link to device which is upper to this one. The caller must hold
5547 * the RTNL lock. On a failure a negative errno code is returned.
5548 * On success the reference counts are adjusted and the function
5549 * returns zero.
5550 */
5551int netdev_upper_dev_link(struct net_device *dev,
5552 struct net_device *upper_dev)
5553{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005554 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005555}
5556EXPORT_SYMBOL(netdev_upper_dev_link);
5557
5558/**
5559 * netdev_master_upper_dev_link - Add a master link to the upper device
5560 * @dev: device
5561 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01005562 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005563 * @upper_info: upper info to be passed down via notifier
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005564 *
5565 * Adds a link to device which is upper to this one. In this case, only
5566 * one master upper device can be linked, although other non-master devices
5567 * might be linked as well. The caller must hold the RTNL lock.
5568 * On a failure a negative errno code is returned. On success the reference
5569 * counts are adjusted and the function returns zero.
5570 */
5571int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01005572 struct net_device *upper_dev,
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005573 void *upper_priv, void *upper_info)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005574{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01005575 return __netdev_upper_dev_link(dev, upper_dev, true,
5576 upper_priv, upper_info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005577}
5578EXPORT_SYMBOL(netdev_master_upper_dev_link);
5579
5580/**
5581 * netdev_upper_dev_unlink - Removes a link to upper device
5582 * @dev: device
5583 * @upper_dev: new upper device
5584 *
5585 * Removes a link to device which is upper to this one. The caller must hold
5586 * the RTNL lock.
5587 */
5588void netdev_upper_dev_unlink(struct net_device *dev,
5589 struct net_device *upper_dev)
5590{
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005591 struct netdev_notifier_changeupper_info changeupper_info;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005592 struct netdev_adjacent *i, *j;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005593 ASSERT_RTNL();
5594
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005595 changeupper_info.upper_dev = upper_dev;
5596 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5597 changeupper_info.linking = false;
5598
Jiri Pirko573c7ba2015-10-16 14:01:22 +02005599 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5600 &changeupper_info.info);
5601
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005602 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005603
5604 /* Here is the tricky part. We must remove all dev's lower
5605 * devices from all upper_dev's upper devices and vice
5606 * versa, to maintain the graph relationship.
5607 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005608 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5609 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005610 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5611
5612 /* remove also the devices itself from lower/upper device
5613 * list
5614 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005615 list_for_each_entry(i, &dev->all_adj_list.lower, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005616 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5617
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005618 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005619 __netdev_adjacent_dev_unlink(dev, i->dev);
5620
Jiri Pirko0e4ead92015-08-27 09:31:18 +02005621 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5622 &changeupper_info.info);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005623}
5624EXPORT_SYMBOL(netdev_upper_dev_unlink);
5625
Moni Shoua61bd3852015-02-03 16:48:29 +02005626/**
5627 * netdev_bonding_info_change - Dispatch event about slave change
5628 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09005629 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02005630 *
5631 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5632 * The caller must hold the RTNL lock.
5633 */
5634void netdev_bonding_info_change(struct net_device *dev,
5635 struct netdev_bonding_info *bonding_info)
5636{
5637 struct netdev_notifier_bonding_info info;
5638
5639 memcpy(&info.bonding_info, bonding_info,
5640 sizeof(struct netdev_bonding_info));
5641 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5642 &info.info);
5643}
5644EXPORT_SYMBOL(netdev_bonding_info_change);
5645
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08005646static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005647{
5648 struct netdev_adjacent *iter;
5649
5650 struct net *net = dev_net(dev);
5651
5652 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5653 if (!net_eq(net,dev_net(iter->dev)))
5654 continue;
5655 netdev_adjacent_sysfs_add(iter->dev, dev,
5656 &iter->dev->adj_list.lower);
5657 netdev_adjacent_sysfs_add(dev, iter->dev,
5658 &dev->adj_list.upper);
5659 }
5660
5661 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5662 if (!net_eq(net,dev_net(iter->dev)))
5663 continue;
5664 netdev_adjacent_sysfs_add(iter->dev, dev,
5665 &iter->dev->adj_list.upper);
5666 netdev_adjacent_sysfs_add(dev, iter->dev,
5667 &dev->adj_list.lower);
5668 }
5669}
5670
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08005671static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005672{
5673 struct netdev_adjacent *iter;
5674
5675 struct net *net = dev_net(dev);
5676
5677 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5678 if (!net_eq(net,dev_net(iter->dev)))
5679 continue;
5680 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5681 &iter->dev->adj_list.lower);
5682 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5683 &dev->adj_list.upper);
5684 }
5685
5686 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5687 if (!net_eq(net,dev_net(iter->dev)))
5688 continue;
5689 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5690 &iter->dev->adj_list.upper);
5691 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5692 &dev->adj_list.lower);
5693 }
5694}
5695
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005696void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02005697{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005698 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005699
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005700 struct net *net = dev_net(dev);
5701
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005702 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005703 if (!net_eq(net,dev_net(iter->dev)))
5704 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005705 netdev_adjacent_sysfs_del(iter->dev, oldname,
5706 &iter->dev->adj_list.lower);
5707 netdev_adjacent_sysfs_add(iter->dev, dev,
5708 &iter->dev->adj_list.lower);
5709 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02005710
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005711 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005712 if (!net_eq(net,dev_net(iter->dev)))
5713 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005714 netdev_adjacent_sysfs_del(iter->dev, oldname,
5715 &iter->dev->adj_list.upper);
5716 netdev_adjacent_sysfs_add(iter->dev, dev,
5717 &iter->dev->adj_list.upper);
5718 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02005719}
Veaceslav Falico402dae92013-09-25 09:20:09 +02005720
5721void *netdev_lower_dev_get_private(struct net_device *dev,
5722 struct net_device *lower_dev)
5723{
5724 struct netdev_adjacent *lower;
5725
5726 if (!lower_dev)
5727 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02005728 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02005729 if (!lower)
5730 return NULL;
5731
5732 return lower->private;
5733}
5734EXPORT_SYMBOL(netdev_lower_dev_get_private);
5735
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005736
5737int dev_get_nest_level(struct net_device *dev,
Jiri Pirkob618aaa2015-12-04 15:01:31 +01005738 bool (*type_check)(const struct net_device *dev))
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005739{
5740 struct net_device *lower = NULL;
5741 struct list_head *iter;
5742 int max_nest = -1;
5743 int nest;
5744
5745 ASSERT_RTNL();
5746
5747 netdev_for_each_lower_dev(dev, lower, iter) {
5748 nest = dev_get_nest_level(lower, type_check);
5749 if (max_nest < nest)
5750 max_nest = nest;
5751 }
5752
5753 if (type_check(dev))
5754 max_nest++;
5755
5756 return max_nest;
5757}
5758EXPORT_SYMBOL(dev_get_nest_level);
5759
Jiri Pirko04d48262015-12-03 12:12:15 +01005760/**
5761 * netdev_lower_change - Dispatch event about lower device state change
5762 * @lower_dev: device
5763 * @lower_state_info: state to dispatch
5764 *
5765 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
5766 * The caller must hold the RTNL lock.
5767 */
5768void netdev_lower_state_changed(struct net_device *lower_dev,
5769 void *lower_state_info)
5770{
5771 struct netdev_notifier_changelowerstate_info changelowerstate_info;
5772
5773 ASSERT_RTNL();
5774 changelowerstate_info.lower_state_info = lower_state_info;
5775 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
5776 &changelowerstate_info.info);
5777}
5778EXPORT_SYMBOL(netdev_lower_state_changed);
5779
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005780static void dev_change_rx_flags(struct net_device *dev, int flags)
5781{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005782 const struct net_device_ops *ops = dev->netdev_ops;
5783
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05005784 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005785 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005786}
5787
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005788static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07005789{
Eric Dumazetb536db92011-11-30 21:42:26 +00005790 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005791 kuid_t uid;
5792 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005793
Patrick McHardy24023452007-07-14 18:51:31 -07005794 ASSERT_RTNL();
5795
Wang Chendad9b332008-06-18 01:48:28 -07005796 dev->flags |= IFF_PROMISC;
5797 dev->promiscuity += inc;
5798 if (dev->promiscuity == 0) {
5799 /*
5800 * Avoid overflow.
5801 * If inc causes overflow, untouch promisc and return error.
5802 */
5803 if (inc < 0)
5804 dev->flags &= ~IFF_PROMISC;
5805 else {
5806 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005807 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5808 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005809 return -EOVERFLOW;
5810 }
5811 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005812 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005813 pr_info("device %s %s promiscuous mode\n",
5814 dev->name,
5815 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005816 if (audit_enabled) {
5817 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005818 audit_log(current->audit_context, GFP_ATOMIC,
5819 AUDIT_ANOM_PROMISCUOUS,
5820 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5821 dev->name, (dev->flags & IFF_PROMISC),
5822 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005823 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005824 from_kuid(&init_user_ns, uid),
5825 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005826 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005827 }
Patrick McHardy24023452007-07-14 18:51:31 -07005828
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005829 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005830 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005831 if (notify)
5832 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07005833 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005834}
5835
Linus Torvalds1da177e2005-04-16 15:20:36 -07005836/**
5837 * dev_set_promiscuity - update promiscuity count on a device
5838 * @dev: device
5839 * @inc: modifier
5840 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005841 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842 * remains above zero the interface remains promiscuous. Once it hits zero
5843 * the device reverts back to normal filtering operation. A negative inc
5844 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005845 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005846 */
Wang Chendad9b332008-06-18 01:48:28 -07005847int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005848{
Eric Dumazetb536db92011-11-30 21:42:26 +00005849 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005850 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005851
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005852 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005853 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005854 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005855 if (dev->flags != old_flags)
5856 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005857 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005859EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005860
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005861static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005862{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005863 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005864
Patrick McHardy24023452007-07-14 18:51:31 -07005865 ASSERT_RTNL();
5866
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005868 dev->allmulti += inc;
5869 if (dev->allmulti == 0) {
5870 /*
5871 * Avoid overflow.
5872 * If inc causes overflow, untouch allmulti and return error.
5873 */
5874 if (inc < 0)
5875 dev->flags &= ~IFF_ALLMULTI;
5876 else {
5877 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005878 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5879 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005880 return -EOVERFLOW;
5881 }
5882 }
Patrick McHardy24023452007-07-14 18:51:31 -07005883 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005884 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005885 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005886 if (notify)
5887 __dev_notify_flags(dev, old_flags,
5888 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07005889 }
Wang Chendad9b332008-06-18 01:48:28 -07005890 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005891}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005892
5893/**
5894 * dev_set_allmulti - update allmulti count on a device
5895 * @dev: device
5896 * @inc: modifier
5897 *
5898 * Add or remove reception of all multicast frames to a device. While the
5899 * count in the device remains above zero the interface remains listening
5900 * to all interfaces. Once it hits zero the device reverts back to normal
5901 * filtering operation. A negative @inc value is used to drop the counter
5902 * when releasing a resource needing all multicasts.
5903 * Return 0 if successful or a negative errno code on error.
5904 */
5905
5906int dev_set_allmulti(struct net_device *dev, int inc)
5907{
5908 return __dev_set_allmulti(dev, inc, true);
5909}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005910EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005911
5912/*
5913 * Upload unicast and multicast address lists to device and
5914 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005915 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005916 * are present.
5917 */
5918void __dev_set_rx_mode(struct net_device *dev)
5919{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005920 const struct net_device_ops *ops = dev->netdev_ops;
5921
Patrick McHardy4417da62007-06-27 01:28:10 -07005922 /* dev_open will call this function so the list will stay sane. */
5923 if (!(dev->flags&IFF_UP))
5924 return;
5925
5926 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005927 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005928
Jiri Pirko01789342011-08-16 06:29:00 +00005929 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005930 /* Unicast addresses changes may only happen under the rtnl,
5931 * therefore calling __dev_set_promiscuity here is safe.
5932 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005933 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005934 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005935 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005936 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005937 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005938 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005939 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005940 }
Jiri Pirko01789342011-08-16 06:29:00 +00005941
5942 if (ops->ndo_set_rx_mode)
5943 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005944}
5945
5946void dev_set_rx_mode(struct net_device *dev)
5947{
David S. Millerb9e40852008-07-15 00:15:08 -07005948 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005949 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005950 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005951}
5952
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005953/**
5954 * dev_get_flags - get flags reported to userspace
5955 * @dev: device
5956 *
5957 * Get the combination of flag bits exported through APIs to userspace.
5958 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005959unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005960{
Eric Dumazet95c96172012-04-15 05:58:06 +00005961 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005962
5963 flags = (dev->flags & ~(IFF_PROMISC |
5964 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005965 IFF_RUNNING |
5966 IFF_LOWER_UP |
5967 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005968 (dev->gflags & (IFF_PROMISC |
5969 IFF_ALLMULTI));
5970
Stefan Rompfb00055a2006-03-20 17:09:11 -08005971 if (netif_running(dev)) {
5972 if (netif_oper_up(dev))
5973 flags |= IFF_RUNNING;
5974 if (netif_carrier_ok(dev))
5975 flags |= IFF_LOWER_UP;
5976 if (netif_dormant(dev))
5977 flags |= IFF_DORMANT;
5978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005979
5980 return flags;
5981}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005982EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005983
Patrick McHardybd380812010-02-26 06:34:53 +00005984int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985{
Eric Dumazetb536db92011-11-30 21:42:26 +00005986 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005987 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988
Patrick McHardy24023452007-07-14 18:51:31 -07005989 ASSERT_RTNL();
5990
Linus Torvalds1da177e2005-04-16 15:20:36 -07005991 /*
5992 * Set the flags on our device.
5993 */
5994
5995 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5996 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5997 IFF_AUTOMEDIA)) |
5998 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5999 IFF_ALLMULTI));
6000
6001 /*
6002 * Load in the correct multicast list now the flags have changed.
6003 */
6004
Patrick McHardyb6c40d62008-10-07 15:26:48 -07006005 if ((old_flags ^ flags) & IFF_MULTICAST)
6006 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07006007
Patrick McHardy4417da62007-06-27 01:28:10 -07006008 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006009
6010 /*
6011 * Have we downed the interface. We handle IFF_UP ourselves
6012 * according to user attempts to set it, rather than blindly
6013 * setting it.
6014 */
6015
6016 ret = 0;
Peter Pan(潘卫平)d215d102014-06-16 21:57:22 +08006017 if ((old_flags ^ flags) & IFF_UP)
Patrick McHardybd380812010-02-26 06:34:53 +00006018 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006019
Linus Torvalds1da177e2005-04-16 15:20:36 -07006020 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006021 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006022 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006023
Linus Torvalds1da177e2005-04-16 15:20:36 -07006024 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006025
6026 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6027 if (dev->flags != old_flags)
6028 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006029 }
6030
6031 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6032 is important. Some (broken) drivers set IFF_PROMISC, when
6033 IFF_ALLMULTI is requested not asking us and not reporting.
6034 */
6035 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006036 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6037
Linus Torvalds1da177e2005-04-16 15:20:36 -07006038 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006039 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006040 }
6041
Patrick McHardybd380812010-02-26 06:34:53 +00006042 return ret;
6043}
6044
Nicolas Dichtela528c212013-09-25 12:02:44 +02006045void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6046 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00006047{
6048 unsigned int changes = dev->flags ^ old_flags;
6049
Nicolas Dichtela528c212013-09-25 12:02:44 +02006050 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006051 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006052
Patrick McHardybd380812010-02-26 06:34:53 +00006053 if (changes & IFF_UP) {
6054 if (dev->flags & IFF_UP)
6055 call_netdevice_notifiers(NETDEV_UP, dev);
6056 else
6057 call_netdevice_notifiers(NETDEV_DOWN, dev);
6058 }
6059
6060 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00006061 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6062 struct netdev_notifier_change_info change_info;
6063
6064 change_info.flags_changed = changes;
6065 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6066 &change_info.info);
6067 }
Patrick McHardybd380812010-02-26 06:34:53 +00006068}
6069
6070/**
6071 * dev_change_flags - change device settings
6072 * @dev: device
6073 * @flags: device state flags
6074 *
6075 * Change settings on device based state flags. The flags are
6076 * in the userspace exported format.
6077 */
Eric Dumazetb536db92011-11-30 21:42:26 +00006078int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00006079{
Eric Dumazetb536db92011-11-30 21:42:26 +00006080 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006081 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00006082
6083 ret = __dev_change_flags(dev, flags);
6084 if (ret < 0)
6085 return ret;
6086
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02006087 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02006088 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006089 return ret;
6090}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006091EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006092
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006093static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6094{
6095 const struct net_device_ops *ops = dev->netdev_ops;
6096
6097 if (ops->ndo_change_mtu)
6098 return ops->ndo_change_mtu(dev, new_mtu);
6099
6100 dev->mtu = new_mtu;
6101 return 0;
6102}
6103
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006104/**
6105 * dev_set_mtu - Change maximum transfer unit
6106 * @dev: device
6107 * @new_mtu: new transfer unit
6108 *
6109 * Change the maximum transfer size of the network device.
6110 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006111int dev_set_mtu(struct net_device *dev, int new_mtu)
6112{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006113 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006114
6115 if (new_mtu == dev->mtu)
6116 return 0;
6117
6118 /* MTU must be positive. */
6119 if (new_mtu < 0)
6120 return -EINVAL;
6121
6122 if (!netif_device_present(dev))
6123 return -ENODEV;
6124
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01006125 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6126 err = notifier_to_errno(err);
6127 if (err)
6128 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006129
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006130 orig_mtu = dev->mtu;
6131 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006132
Veaceslav Falico2315dc92014-01-10 16:56:25 +01006133 if (!err) {
6134 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6135 err = notifier_to_errno(err);
6136 if (err) {
6137 /* setting mtu back and notifying everyone again,
6138 * so that they have a chance to revert changes.
6139 */
6140 __dev_set_mtu(dev, orig_mtu);
6141 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6142 }
6143 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006144 return err;
6145}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006146EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006147
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006148/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006149 * dev_set_group - Change group this device belongs to
6150 * @dev: device
6151 * @new_group: group this device should belong to
6152 */
6153void dev_set_group(struct net_device *dev, int new_group)
6154{
6155 dev->group = new_group;
6156}
6157EXPORT_SYMBOL(dev_set_group);
6158
6159/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006160 * dev_set_mac_address - Change Media Access Control Address
6161 * @dev: device
6162 * @sa: new address
6163 *
6164 * Change the hardware (MAC) address of the device
6165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006166int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6167{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006168 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006169 int err;
6170
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006171 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006172 return -EOPNOTSUPP;
6173 if (sa->sa_family != dev->type)
6174 return -EINVAL;
6175 if (!netif_device_present(dev))
6176 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006177 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00006178 if (err)
6179 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00006180 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00006181 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006182 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00006183 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006184}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006185EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006186
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006187/**
6188 * dev_change_carrier - Change device carrier
6189 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00006190 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00006191 *
6192 * Change device carrier
6193 */
6194int dev_change_carrier(struct net_device *dev, bool new_carrier)
6195{
6196 const struct net_device_ops *ops = dev->netdev_ops;
6197
6198 if (!ops->ndo_change_carrier)
6199 return -EOPNOTSUPP;
6200 if (!netif_device_present(dev))
6201 return -ENODEV;
6202 return ops->ndo_change_carrier(dev, new_carrier);
6203}
6204EXPORT_SYMBOL(dev_change_carrier);
6205
Linus Torvalds1da177e2005-04-16 15:20:36 -07006206/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02006207 * dev_get_phys_port_id - Get device physical port ID
6208 * @dev: device
6209 * @ppid: port ID
6210 *
6211 * Get device physical port ID
6212 */
6213int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01006214 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02006215{
6216 const struct net_device_ops *ops = dev->netdev_ops;
6217
6218 if (!ops->ndo_get_phys_port_id)
6219 return -EOPNOTSUPP;
6220 return ops->ndo_get_phys_port_id(dev, ppid);
6221}
6222EXPORT_SYMBOL(dev_get_phys_port_id);
6223
6224/**
David Aherndb24a902015-03-17 20:23:15 -06006225 * dev_get_phys_port_name - Get device physical port name
6226 * @dev: device
6227 * @name: port name
6228 *
6229 * Get device physical port name
6230 */
6231int dev_get_phys_port_name(struct net_device *dev,
6232 char *name, size_t len)
6233{
6234 const struct net_device_ops *ops = dev->netdev_ops;
6235
6236 if (!ops->ndo_get_phys_port_name)
6237 return -EOPNOTSUPP;
6238 return ops->ndo_get_phys_port_name(dev, name, len);
6239}
6240EXPORT_SYMBOL(dev_get_phys_port_name);
6241
6242/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07006243 * dev_change_proto_down - update protocol port state information
6244 * @dev: device
6245 * @proto_down: new value
6246 *
6247 * This info can be used by switch drivers to set the phys state of the
6248 * port.
6249 */
6250int dev_change_proto_down(struct net_device *dev, bool proto_down)
6251{
6252 const struct net_device_ops *ops = dev->netdev_ops;
6253
6254 if (!ops->ndo_change_proto_down)
6255 return -EOPNOTSUPP;
6256 if (!netif_device_present(dev))
6257 return -ENODEV;
6258 return ops->ndo_change_proto_down(dev, proto_down);
6259}
6260EXPORT_SYMBOL(dev_change_proto_down);
6261
6262/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006263 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07006264 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07006265 *
6266 * Returns a suitable unique value for a new device interface
6267 * number. The caller must hold the rtnl semaphore or the
6268 * dev_base_lock to be sure it remains unique.
6269 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07006270static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006271{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00006272 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006273 for (;;) {
6274 if (++ifindex <= 0)
6275 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006276 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00006277 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006278 }
6279}
6280
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08006282static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07006283DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006284
Stephen Hemminger6f05f622007-03-08 20:46:03 -08006285static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006286{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006287 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07006288 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006289}
6290
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006291static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006292{
Krishna Kumare93737b2009-12-08 22:26:02 +00006293 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006294 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006295
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006296 BUG_ON(dev_boot_phase);
6297 ASSERT_RTNL();
6298
Krishna Kumare93737b2009-12-08 22:26:02 +00006299 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006300 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00006301 * for initialization unwind. Remove those
6302 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006303 */
6304 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006305 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6306 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006307
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006308 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00006309 list_del(&dev->unreg_list);
6310 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006311 }
Eric Dumazet449f4542011-05-19 12:24:16 +00006312 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006313 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00006314 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006315
Octavian Purdila44345722010-12-13 12:44:07 +00006316 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006317 list_for_each_entry(dev, head, unreg_list)
6318 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04006319 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006320
Octavian Purdila44345722010-12-13 12:44:07 +00006321 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006322 /* And unlink it from device chain. */
6323 unlist_netdevice(dev);
6324
6325 dev->reg_state = NETREG_UNREGISTERING;
Julian Anastasove9e4dd32015-07-09 09:59:09 +03006326 on_each_cpu(flush_backlog, dev, 1);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006327 }
6328
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006329 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006330
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006331 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006332 struct sk_buff *skb = NULL;
6333
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006334 /* Shutdown queueing discipline. */
6335 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006336
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006337
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006338 /* Notify protocols, that we are about to destroy
6339 this device. They should clean all the things.
6340 */
6341 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6342
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006343 if (!dev->rtnl_link_ops ||
6344 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6345 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6346 GFP_KERNEL);
6347
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006348 /*
6349 * Flush the unicast and multicast chains
6350 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006351 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006352 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006353
6354 if (dev->netdev_ops->ndo_uninit)
6355 dev->netdev_ops->ndo_uninit(dev);
6356
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006357 if (skb)
6358 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07006359
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006360 /* Notifier chain MUST detach us all upper devices. */
6361 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006362
6363 /* Remove entries from kobject tree */
6364 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00006365#ifdef CONFIG_XPS
6366 /* Remove XPS queueing entries */
6367 netif_reset_xps_queues_gt(dev, 0);
6368#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006369 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006370
Eric W. Biederman850a5452011-10-13 22:25:23 +00006371 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006372
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00006373 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006374 dev_put(dev);
6375}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006376
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006377static void rollback_registered(struct net_device *dev)
6378{
6379 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006380
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006381 list_add(&dev->unreg_list, &single);
6382 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006383 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006384}
6385
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006386static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
6387 struct net_device *upper, netdev_features_t features)
6388{
6389 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6390 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006391 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006392
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006393 for_each_netdev_feature(&upper_disables, feature_bit) {
6394 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006395 if (!(upper->wanted_features & feature)
6396 && (features & feature)) {
6397 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
6398 &feature, upper->name);
6399 features &= ~feature;
6400 }
6401 }
6402
6403 return features;
6404}
6405
6406static void netdev_sync_lower_features(struct net_device *upper,
6407 struct net_device *lower, netdev_features_t features)
6408{
6409 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6410 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006411 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006412
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05006413 for_each_netdev_feature(&upper_disables, feature_bit) {
6414 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006415 if (!(features & feature) && (lower->features & feature)) {
6416 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
6417 &feature, lower->name);
6418 lower->wanted_features &= ~feature;
6419 netdev_update_features(lower);
6420
6421 if (unlikely(lower->features & feature))
6422 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
6423 &feature, lower->name);
6424 }
6425 }
6426}
6427
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006428static netdev_features_t netdev_fix_features(struct net_device *dev,
6429 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07006430{
Michał Mirosław57422dc2011-01-22 12:14:12 +00006431 /* Fix illegal checksum combinations */
6432 if ((features & NETIF_F_HW_CSUM) &&
6433 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006434 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00006435 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6436 }
6437
Herbert Xub63365a2008-10-23 01:11:29 -07006438 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00006439 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006440 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00006441 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07006442 }
6443
Pravin B Shelarec5f0612013-03-07 09:28:01 +00006444 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6445 !(features & NETIF_F_IP_CSUM)) {
6446 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6447 features &= ~NETIF_F_TSO;
6448 features &= ~NETIF_F_TSO_ECN;
6449 }
6450
6451 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6452 !(features & NETIF_F_IPV6_CSUM)) {
6453 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6454 features &= ~NETIF_F_TSO6;
6455 }
6456
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00006457 /* TSO ECN requires that TSO is present as well. */
6458 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6459 features &= ~NETIF_F_TSO_ECN;
6460
Michał Mirosław212b5732011-02-15 16:59:16 +00006461 /* Software GSO depends on SG. */
6462 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006463 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00006464 features &= ~NETIF_F_GSO;
6465 }
6466
Michał Mirosławacd11302011-01-24 15:45:15 -08006467 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07006468 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00006469 /* maybe split UFO into V4 and V6? */
6470 if (!((features & NETIF_F_GEN_CSUM) ||
6471 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6472 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006473 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006474 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006475 features &= ~NETIF_F_UFO;
6476 }
6477
6478 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006479 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006480 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006481 features &= ~NETIF_F_UFO;
6482 }
6483 }
6484
Jiri Pirkod0290212014-04-02 23:09:31 +02006485#ifdef CONFIG_NET_RX_BUSY_POLL
6486 if (dev->netdev_ops->ndo_busy_poll)
6487 features |= NETIF_F_BUSY_POLL;
6488 else
6489#endif
6490 features &= ~NETIF_F_BUSY_POLL;
6491
Herbert Xub63365a2008-10-23 01:11:29 -07006492 return features;
6493}
Herbert Xub63365a2008-10-23 01:11:29 -07006494
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006495int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00006496{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006497 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006498 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006499 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05006500 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00006501
Michał Mirosław87267482011-04-12 09:56:38 +00006502 ASSERT_RTNL();
6503
Michał Mirosław5455c692011-02-15 16:59:17 +00006504 features = netdev_get_wanted_features(dev);
6505
6506 if (dev->netdev_ops->ndo_fix_features)
6507 features = dev->netdev_ops->ndo_fix_features(dev, features);
6508
6509 /* driver might be less strict about feature dependencies */
6510 features = netdev_fix_features(dev, features);
6511
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006512 /* some features can't be enabled if they're off an an upper device */
6513 netdev_for_each_upper_dev_rcu(dev, upper, iter)
6514 features = netdev_sync_upper_features(dev, upper, features);
6515
Michał Mirosław5455c692011-02-15 16:59:17 +00006516 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05006517 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00006518
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006519 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6520 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00006521
6522 if (dev->netdev_ops->ndo_set_features)
6523 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01006524 else
6525 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00006526
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006527 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00006528 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006529 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6530 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01006531 /* return non-0 since some features might have changed and
6532 * it's better to fire a spurious notification than miss it
6533 */
6534 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006535 }
6536
Jarod Wilsone7868a82015-11-03 23:09:32 -05006537sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05006538 /* some features must be disabled on lower devices when disabled
6539 * on an upper device (think: bonding master or bridge)
6540 */
6541 netdev_for_each_lower_dev(dev, lower, iter)
6542 netdev_sync_lower_features(dev, lower, features);
6543
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006544 if (!err)
6545 dev->features = features;
6546
Jarod Wilsone7868a82015-11-03 23:09:32 -05006547 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006548}
6549
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006550/**
6551 * netdev_update_features - recalculate device features
6552 * @dev: the device to check
6553 *
6554 * Recalculate dev->features set and send notifications if it
6555 * has changed. Should be called after driver or hardware dependent
6556 * conditions might have changed that influence the features.
6557 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006558void netdev_update_features(struct net_device *dev)
6559{
6560 if (__netdev_update_features(dev))
6561 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00006562}
6563EXPORT_SYMBOL(netdev_update_features);
6564
Linus Torvalds1da177e2005-04-16 15:20:36 -07006565/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006566 * netdev_change_features - recalculate device features
6567 * @dev: the device to check
6568 *
6569 * Recalculate dev->features set and send notifications even
6570 * if they have not changed. Should be called instead of
6571 * netdev_update_features() if also dev->vlan_features might
6572 * have changed to allow the changes to be propagated to stacked
6573 * VLAN devices.
6574 */
6575void netdev_change_features(struct net_device *dev)
6576{
6577 __netdev_update_features(dev);
6578 netdev_features_change(dev);
6579}
6580EXPORT_SYMBOL(netdev_change_features);
6581
6582/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006583 * netif_stacked_transfer_operstate - transfer operstate
6584 * @rootdev: the root or lower level device to transfer state from
6585 * @dev: the device to transfer operstate to
6586 *
6587 * Transfer operational state from root to device. This is normally
6588 * called when a stacking relationship exists between the root
6589 * device and the device(a leaf device).
6590 */
6591void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6592 struct net_device *dev)
6593{
6594 if (rootdev->operstate == IF_OPER_DORMANT)
6595 netif_dormant_on(dev);
6596 else
6597 netif_dormant_off(dev);
6598
6599 if (netif_carrier_ok(rootdev)) {
6600 if (!netif_carrier_ok(dev))
6601 netif_carrier_on(dev);
6602 } else {
6603 if (netif_carrier_ok(dev))
6604 netif_carrier_off(dev);
6605 }
6606}
6607EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6608
Michael Daltona953be52014-01-16 22:23:28 -08006609#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006610static int netif_alloc_rx_queues(struct net_device *dev)
6611{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006612 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006613 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05306614 size_t sz = count * sizeof(*rx);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006615
Tom Herbertbd25fa72010-10-18 18:00:16 +00006616 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006617
Pankaj Gupta10595902015-01-12 11:41:28 +05306618 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6619 if (!rx) {
6620 rx = vzalloc(sz);
6621 if (!rx)
6622 return -ENOMEM;
6623 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00006624 dev->_rx = rx;
6625
Tom Herbertbd25fa72010-10-18 18:00:16 +00006626 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006627 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006628 return 0;
6629}
Tom Herbertbf264142010-11-26 08:36:09 +00006630#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006631
Changli Gaoaa942102010-12-04 02:31:41 +00006632static void netdev_init_one_queue(struct net_device *dev,
6633 struct netdev_queue *queue, void *_unused)
6634{
6635 /* Initialize queue lock */
6636 spin_lock_init(&queue->_xmit_lock);
6637 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6638 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006639 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006640 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006641#ifdef CONFIG_BQL
6642 dql_init(&queue->dql, HZ);
6643#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006644}
6645
Eric Dumazet60877a32013-06-20 01:15:51 -07006646static void netif_free_tx_queues(struct net_device *dev)
6647{
WANG Cong4cb28972014-06-02 15:55:22 -07006648 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07006649}
6650
Tom Herberte6484932010-10-18 18:04:39 +00006651static int netif_alloc_netdev_queues(struct net_device *dev)
6652{
6653 unsigned int count = dev->num_tx_queues;
6654 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07006655 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00006656
Eric Dumazetd3397272015-07-06 17:13:26 +02006657 if (count < 1 || count > 0xffff)
6658 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00006659
Eric Dumazet60877a32013-06-20 01:15:51 -07006660 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6661 if (!tx) {
6662 tx = vzalloc(sz);
6663 if (!tx)
6664 return -ENOMEM;
6665 }
Tom Herberte6484932010-10-18 18:04:39 +00006666 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006667
Tom Herberte6484932010-10-18 18:04:39 +00006668 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6669 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006670
6671 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006672}
6673
Denys Vlasenkoa2029242015-05-11 21:17:53 +02006674void netif_tx_stop_all_queues(struct net_device *dev)
6675{
6676 unsigned int i;
6677
6678 for (i = 0; i < dev->num_tx_queues; i++) {
6679 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6680 netif_tx_stop_queue(txq);
6681 }
6682}
6683EXPORT_SYMBOL(netif_tx_stop_all_queues);
6684
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006685/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006686 * register_netdevice - register a network device
6687 * @dev: device to register
6688 *
6689 * Take a completed network device structure and add it to the kernel
6690 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6691 * chain. 0 is returned on success. A negative errno code is returned
6692 * on a failure to set up the device, or if the name is a duplicate.
6693 *
6694 * Callers must hold the rtnl semaphore. You may want
6695 * register_netdev() instead of this.
6696 *
6697 * BUGS:
6698 * The locking appears insufficient to guarantee two parallel registers
6699 * will not get the same name.
6700 */
6701
6702int register_netdevice(struct net_device *dev)
6703{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006704 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006705 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006706
6707 BUG_ON(dev_boot_phase);
6708 ASSERT_RTNL();
6709
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006710 might_sleep();
6711
Linus Torvalds1da177e2005-04-16 15:20:36 -07006712 /* When net_device's are persistent, this will be fatal. */
6713 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006714 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006715
David S. Millerf1f28aa2008-07-15 00:08:33 -07006716 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006717 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718
Gao feng828de4f2012-09-13 20:58:27 +00006719 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006720 if (ret < 0)
6721 goto out;
6722
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006724 if (dev->netdev_ops->ndo_init) {
6725 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006726 if (ret) {
6727 if (ret > 0)
6728 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006729 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006730 }
6731 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006732
Patrick McHardyf6469682013-04-19 02:04:27 +00006733 if (((dev->hw_features | dev->features) &
6734 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00006735 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6736 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6737 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6738 ret = -EINVAL;
6739 goto err_uninit;
6740 }
6741
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006742 ret = -EBUSY;
6743 if (!dev->ifindex)
6744 dev->ifindex = dev_new_index(net);
6745 else if (__dev_get_by_index(net, dev->ifindex))
6746 goto err_uninit;
6747
Michał Mirosław5455c692011-02-15 16:59:17 +00006748 /* Transfer changeable features to wanted_features and enable
6749 * software offloads (GSO and GRO).
6750 */
6751 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006752 dev->features |= NETIF_F_SOFT_FEATURES;
6753 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006754
Michał Mirosław34324dc2011-11-15 15:29:55 +00006755 if (!(dev->flags & IFF_LOOPBACK)) {
6756 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006757 }
6758
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006759 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006760 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006761 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006762
Pravin B Shelaree579672013-03-07 09:28:08 +00006763 /* Make NETIF_F_SG inheritable to tunnel devices.
6764 */
6765 dev->hw_enc_features |= NETIF_F_SG;
6766
Simon Horman0d89d202013-05-23 21:02:52 +00006767 /* Make NETIF_F_SG inheritable to MPLS.
6768 */
6769 dev->mpls_features |= NETIF_F_SG;
6770
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006771 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6772 ret = notifier_to_errno(ret);
6773 if (ret)
6774 goto err_uninit;
6775
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006776 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006777 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006778 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006779 dev->reg_state = NETREG_REGISTERED;
6780
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006781 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006782
Linus Torvalds1da177e2005-04-16 15:20:36 -07006783 /*
6784 * Default initial state at registry is that the
6785 * device is present.
6786 */
6787
6788 set_bit(__LINK_STATE_PRESENT, &dev->state);
6789
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006790 linkwatch_init_dev(dev);
6791
Linus Torvalds1da177e2005-04-16 15:20:36 -07006792 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006793 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006794 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006795 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006796
Jiri Pirko948b3372013-01-08 01:38:25 +00006797 /* If the device has permanent device address, driver should
6798 * set dev_addr and also addr_assign_type should be set to
6799 * NET_ADDR_PERM (default value).
6800 */
6801 if (dev->addr_assign_type == NET_ADDR_PERM)
6802 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6803
Linus Torvalds1da177e2005-04-16 15:20:36 -07006804 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006805 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006806 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006807 if (ret) {
6808 rollback_registered(dev);
6809 dev->reg_state = NETREG_UNREGISTERED;
6810 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006811 /*
6812 * Prevent userspace races by waiting until the network
6813 * device is fully setup before sending notifications.
6814 */
Patrick McHardya2835762010-02-26 06:34:51 +00006815 if (!dev->rtnl_link_ops ||
6816 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006817 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006818
6819out:
6820 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006821
6822err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006823 if (dev->netdev_ops->ndo_uninit)
6824 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006825 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006826}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006827EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006828
6829/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006830 * init_dummy_netdev - init a dummy network device for NAPI
6831 * @dev: device to init
6832 *
6833 * This takes a network device structure and initialize the minimum
6834 * amount of fields so it can be used to schedule NAPI polls without
6835 * registering a full blown interface. This is to be used by drivers
6836 * that need to tie several hardware interfaces to a single NAPI
6837 * poll scheduler due to HW limitations.
6838 */
6839int init_dummy_netdev(struct net_device *dev)
6840{
6841 /* Clear everything. Note we don't initialize spinlocks
6842 * are they aren't supposed to be taken by any of the
6843 * NAPI code and this dummy netdev is supposed to be
6844 * only ever used for NAPI polls
6845 */
6846 memset(dev, 0, sizeof(struct net_device));
6847
6848 /* make sure we BUG if trying to hit standard
6849 * register/unregister code path
6850 */
6851 dev->reg_state = NETREG_DUMMY;
6852
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006853 /* NAPI wants this */
6854 INIT_LIST_HEAD(&dev->napi_list);
6855
6856 /* a dummy interface is started by default */
6857 set_bit(__LINK_STATE_PRESENT, &dev->state);
6858 set_bit(__LINK_STATE_START, &dev->state);
6859
Eric Dumazet29b44332010-10-11 10:22:12 +00006860 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6861 * because users of this 'device' dont need to change
6862 * its refcount.
6863 */
6864
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006865 return 0;
6866}
6867EXPORT_SYMBOL_GPL(init_dummy_netdev);
6868
6869
6870/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006871 * register_netdev - register a network device
6872 * @dev: device to register
6873 *
6874 * Take a completed network device structure and add it to the kernel
6875 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6876 * chain. 0 is returned on success. A negative errno code is returned
6877 * on a failure to set up the device, or if the name is a duplicate.
6878 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006879 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006880 * and expands the device name if you passed a format string to
6881 * alloc_netdev.
6882 */
6883int register_netdev(struct net_device *dev)
6884{
6885 int err;
6886
6887 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006888 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006889 rtnl_unlock();
6890 return err;
6891}
6892EXPORT_SYMBOL(register_netdev);
6893
Eric Dumazet29b44332010-10-11 10:22:12 +00006894int netdev_refcnt_read(const struct net_device *dev)
6895{
6896 int i, refcnt = 0;
6897
6898 for_each_possible_cpu(i)
6899 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6900 return refcnt;
6901}
6902EXPORT_SYMBOL(netdev_refcnt_read);
6903
Ben Hutchings2c530402012-07-10 10:55:09 +00006904/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006906 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006907 *
6908 * This is called when unregistering network devices.
6909 *
6910 * Any protocol or device that holds a reference should register
6911 * for netdevice notification, and cleanup and put back the
6912 * reference if they receive an UNREGISTER event.
6913 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006914 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006915 */
6916static void netdev_wait_allrefs(struct net_device *dev)
6917{
6918 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006919 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920
Eric Dumazete014deb2009-11-17 05:59:21 +00006921 linkwatch_forget_dev(dev);
6922
Linus Torvalds1da177e2005-04-16 15:20:36 -07006923 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006924 refcnt = netdev_refcnt_read(dev);
6925
6926 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006927 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006928 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006929
6930 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006931 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006932
Eric Dumazet748e2d92012-08-22 21:50:59 +00006933 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006934 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006935 rtnl_lock();
6936
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006937 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006938 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6939 &dev->state)) {
6940 /* We must not have linkwatch events
6941 * pending on unregister. If this
6942 * happens, we simply run the queue
6943 * unscheduled, resulting in a noop
6944 * for this device.
6945 */
6946 linkwatch_run_queue();
6947 }
6948
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006949 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006950
6951 rebroadcast_time = jiffies;
6952 }
6953
6954 msleep(250);
6955
Eric Dumazet29b44332010-10-11 10:22:12 +00006956 refcnt = netdev_refcnt_read(dev);
6957
Linus Torvalds1da177e2005-04-16 15:20:36 -07006958 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006959 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6960 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006961 warning_time = jiffies;
6962 }
6963 }
6964}
6965
6966/* The sequence is:
6967 *
6968 * rtnl_lock();
6969 * ...
6970 * register_netdevice(x1);
6971 * register_netdevice(x2);
6972 * ...
6973 * unregister_netdevice(y1);
6974 * unregister_netdevice(y2);
6975 * ...
6976 * rtnl_unlock();
6977 * free_netdev(y1);
6978 * free_netdev(y2);
6979 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006980 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006981 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006982 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006983 * without deadlocking with linkwatch via keventd.
6984 * 2) Since we run with the RTNL semaphore not held, we can sleep
6985 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006986 *
6987 * We must not return until all unregister events added during
6988 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006989 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006990void netdev_run_todo(void)
6991{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006992 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006993
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006995 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006996
6997 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006998
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006999
7000 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00007001 if (!list_empty(&list))
7002 rcu_barrier();
7003
Linus Torvalds1da177e2005-04-16 15:20:36 -07007004 while (!list_empty(&list)) {
7005 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00007006 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007007 list_del(&dev->todo_list);
7008
Eric Dumazet748e2d92012-08-22 21:50:59 +00007009 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007010 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00007011 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00007012
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007013 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007014 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007015 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007016 dump_stack();
7017 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007018 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007019
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007020 dev->reg_state = NETREG_UNREGISTERED;
7021
7022 netdev_wait_allrefs(dev);
7023
7024 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00007025 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08007026 BUG_ON(!list_empty(&dev->ptype_all));
7027 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00007028 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7029 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07007030 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007031
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07007032 if (dev->destructor)
7033 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007034
Eric W. Biederman50624c92013-09-23 21:19:49 -07007035 /* Report a network device has been unregistered */
7036 rtnl_lock();
7037 dev_net(dev)->dev_unreg_count--;
7038 __rtnl_unlock();
7039 wake_up(&netdev_unregistering_wq);
7040
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07007041 /* Free network device */
7042 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007043 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007044}
7045
Ben Hutchings3cfde792010-07-09 09:11:52 +00007046/* Convert net_device_stats to rtnl_link_stats64. They have the same
7047 * fields in the same order, with only the type differing.
7048 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007049void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7050 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00007051{
7052#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007053 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
7054 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00007055#else
7056 size_t i, n = sizeof(*stats64) / sizeof(u64);
7057 const unsigned long *src = (const unsigned long *)netdev_stats;
7058 u64 *dst = (u64 *)stats64;
7059
7060 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
7061 sizeof(*stats64) / sizeof(u64));
7062 for (i = 0; i < n; i++)
7063 dst[i] = src[i];
7064#endif
7065}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00007066EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00007067
Eric Dumazetd83345a2009-11-16 03:36:51 +00007068/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007069 * dev_get_stats - get network device statistics
7070 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07007071 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007072 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00007073 * Get network statistics from device. Return @storage.
7074 * The device driver may provide its own method by setting
7075 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7076 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007077 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00007078struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7079 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00007080{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007081 const struct net_device_ops *ops = dev->netdev_ops;
7082
Eric Dumazet28172732010-07-07 14:58:56 -07007083 if (ops->ndo_get_stats64) {
7084 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007085 ops->ndo_get_stats64(dev, storage);
7086 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00007087 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007088 } else {
7089 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07007090 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00007091 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet015f0682014-03-27 08:45:56 -07007092 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07007093 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07007094}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08007095EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07007096
Eric Dumazet24824a02010-10-02 06:11:55 +00007097struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07007098{
Eric Dumazet24824a02010-10-02 06:11:55 +00007099 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07007100
Eric Dumazet24824a02010-10-02 06:11:55 +00007101#ifdef CONFIG_NET_CLS_ACT
7102 if (queue)
7103 return queue;
7104 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7105 if (!queue)
7106 return NULL;
7107 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007108 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00007109 queue->qdisc_sleeping = &noop_qdisc;
7110 rcu_assign_pointer(dev->ingress_queue, queue);
7111#endif
7112 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07007113}
7114
Eric Dumazet2c60db02012-09-16 09:17:26 +00007115static const struct ethtool_ops default_ethtool_ops;
7116
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00007117void netdev_set_default_ethtool_ops(struct net_device *dev,
7118 const struct ethtool_ops *ops)
7119{
7120 if (dev->ethtool_ops == &default_ethtool_ops)
7121 dev->ethtool_ops = ops;
7122}
7123EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7124
Eric Dumazet74d332c2013-10-30 13:10:44 -07007125void netdev_freemem(struct net_device *dev)
7126{
7127 char *addr = (char *)dev - dev->padded;
7128
WANG Cong4cb28972014-06-02 15:55:22 -07007129 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007130}
7131
Linus Torvalds1da177e2005-04-16 15:20:36 -07007132/**
Tom Herbert36909ea2011-01-09 19:36:31 +00007133 * alloc_netdev_mqs - allocate network device
Tom Gundersenc835a672014-07-14 16:37:24 +02007134 * @sizeof_priv: size of private data to allocate space for
7135 * @name: device name format string
7136 * @name_assign_type: origin of device name
7137 * @setup: callback to initialize device
7138 * @txqs: the number of TX subqueues to allocate
7139 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07007140 *
7141 * Allocates a struct net_device with private data area for driver use
Li Zhong90e51ad2013-11-22 15:04:46 +08007142 * and performs basic initialization. Also allocates subqueue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00007143 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007144 */
Tom Herbert36909ea2011-01-09 19:36:31 +00007145struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02007146 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00007147 void (*setup)(struct net_device *),
7148 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07007151 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007152 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007153
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07007154 BUG_ON(strlen(name) >= sizeof(dev->name));
7155
Tom Herbert36909ea2011-01-09 19:36:31 +00007156 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007157 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00007158 return NULL;
7159 }
7160
Michael Daltona953be52014-01-16 22:23:28 -08007161#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00007162 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007163 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00007164 return NULL;
7165 }
7166#endif
7167
David S. Millerfd2ea0a2008-07-17 01:56:23 -07007168 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007169 if (sizeof_priv) {
7170 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007171 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07007172 alloc_size += sizeof_priv;
7173 }
7174 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007175 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007176
Eric Dumazet74d332c2013-10-30 13:10:44 -07007177 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7178 if (!p)
7179 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00007180 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007181 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007182
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00007183 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007184 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007185
Eric Dumazet29b44332010-10-11 10:22:12 +00007186 dev->pcpu_refcnt = alloc_percpu(int);
7187 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07007188 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007189
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00007191 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007192
Jiri Pirko22bedad32010-04-01 21:22:57 +00007193 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007194 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00007195
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007196 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007197
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07007198 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00007199 dev->gso_max_segs = GSO_MAX_SEGS;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07007200 dev->gso_min_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007201
Herbert Xud565b0a2008-12-15 23:38:52 -08007202 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007203 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07007204 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00007205 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007206 INIT_LIST_HEAD(&dev->adj_list.upper);
7207 INIT_LIST_HEAD(&dev->adj_list.lower);
7208 INIT_LIST_HEAD(&dev->all_adj_list.upper);
7209 INIT_LIST_HEAD(&dev->all_adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08007210 INIT_LIST_HEAD(&dev->ptype_all);
7211 INIT_LIST_HEAD(&dev->ptype_specific);
Eric Dumazet02875872014-10-05 18:38:35 -07007212 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007213 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007214
Phil Sutter906470c2015-08-18 10:30:48 +02007215 if (!dev->tx_queue_len)
Phil Sutterf84bb1e2015-08-27 21:21:36 +02007216 dev->priv_flags |= IFF_NO_QUEUE;
Phil Sutter906470c2015-08-18 10:30:48 +02007217
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007218 dev->num_tx_queues = txqs;
7219 dev->real_num_tx_queues = txqs;
7220 if (netif_alloc_netdev_queues(dev))
7221 goto free_all;
7222
Michael Daltona953be52014-01-16 22:23:28 -08007223#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007224 dev->num_rx_queues = rxqs;
7225 dev->real_num_rx_queues = rxqs;
7226 if (netif_alloc_rx_queues(dev))
7227 goto free_all;
7228#endif
7229
Linus Torvalds1da177e2005-04-16 15:20:36 -07007230 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02007231 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00007232 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00007233 if (!dev->ethtool_ops)
7234 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +02007235
7236 nf_hook_ingress_init(dev);
7237
Linus Torvalds1da177e2005-04-16 15:20:36 -07007238 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007239
David S. Miller8d3bdbd2011-02-08 15:02:50 -08007240free_all:
7241 free_netdev(dev);
7242 return NULL;
7243
Eric Dumazet29b44332010-10-11 10:22:12 +00007244free_pcpu:
7245 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07007246free_dev:
7247 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00007248 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007249}
Tom Herbert36909ea2011-01-09 19:36:31 +00007250EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251
7252/**
7253 * free_netdev - free network device
7254 * @dev: device
7255 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007256 * This function does the last stage of destroying an allocated device
7257 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007258 * If this is the last reference then it will be freed.
Eric Dumazet93d05d42015-11-18 06:31:03 -08007259 * Must be called in process context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007260 */
7261void free_netdev(struct net_device *dev)
7262{
Herbert Xud565b0a2008-12-15 23:38:52 -08007263 struct napi_struct *p, *n;
7264
Eric Dumazet93d05d42015-11-18 06:31:03 -08007265 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -07007266 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08007267#ifdef CONFIG_SYSFS
Pankaj Gupta10595902015-01-12 11:41:28 +05307268 kvfree(dev->_rx);
Tom Herbertfe822242010-11-09 10:47:38 +00007269#endif
David S. Millere8a04642008-07-17 00:34:19 -07007270
Eric Dumazet33d480c2011-08-11 19:30:52 +00007271 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00007272
Jiri Pirkof001fde2009-05-05 02:48:28 +00007273 /* Flush device addresses */
7274 dev_addr_flush(dev);
7275
Herbert Xud565b0a2008-12-15 23:38:52 -08007276 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7277 netif_napi_del(p);
7278
Eric Dumazet29b44332010-10-11 10:22:12 +00007279 free_percpu(dev->pcpu_refcnt);
7280 dev->pcpu_refcnt = NULL;
7281
Stephen Hemminger3041a062006-05-26 13:25:24 -07007282 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007283 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07007284 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007285 return;
7286 }
7287
7288 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7289 dev->reg_state = NETREG_RELEASED;
7290
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07007291 /* will free via device release */
7292 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007293}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007294EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007295
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007296/**
7297 * synchronize_net - Synchronize with packet receive processing
7298 *
7299 * Wait for packets currently being received to be done.
7300 * Does not block later packets from starting.
7301 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09007302void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007303{
7304 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00007305 if (rtnl_is_locked())
7306 synchronize_rcu_expedited();
7307 else
7308 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007309}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07007310EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007311
7312/**
Eric Dumazet44a08732009-10-27 07:03:04 +00007313 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07007314 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00007315 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08007316 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007317 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08007318 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00007319 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007320 *
7321 * Callers must hold the rtnl semaphore. You may want
7322 * unregister_netdev() instead of this.
7323 */
7324
Eric Dumazet44a08732009-10-27 07:03:04 +00007325void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007326{
Herbert Xua6620712007-12-12 19:21:56 -08007327 ASSERT_RTNL();
7328
Eric Dumazet44a08732009-10-27 07:03:04 +00007329 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00007330 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00007331 } else {
7332 rollback_registered(dev);
7333 /* Finish processing unregister after unlock */
7334 net_set_todo(dev);
7335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007336}
Eric Dumazet44a08732009-10-27 07:03:04 +00007337EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007338
7339/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007340 * unregister_netdevice_many - unregister many devices
7341 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07007342 *
7343 * Note: As most callers use a stack allocated list_head,
7344 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007345 */
7346void unregister_netdevice_many(struct list_head *head)
7347{
7348 struct net_device *dev;
7349
7350 if (!list_empty(head)) {
7351 rollback_registered_many(head);
7352 list_for_each_entry(dev, head, unreg_list)
7353 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07007354 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007355 }
7356}
Eric Dumazet63c80992009-10-27 07:06:49 +00007357EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00007358
7359/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360 * unregister_netdev - remove device from the kernel
7361 * @dev: device
7362 *
7363 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08007364 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007365 *
7366 * This is just a wrapper for unregister_netdevice that takes
7367 * the rtnl semaphore. In general you want to use this and not
7368 * unregister_netdevice.
7369 */
7370void unregister_netdev(struct net_device *dev)
7371{
7372 rtnl_lock();
7373 unregister_netdevice(dev);
7374 rtnl_unlock();
7375}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007376EXPORT_SYMBOL(unregister_netdev);
7377
Eric W. Biedermance286d32007-09-12 13:53:49 +02007378/**
7379 * dev_change_net_namespace - move device to different nethost namespace
7380 * @dev: device
7381 * @net: network namespace
7382 * @pat: If not NULL name pattern to try if the current device name
7383 * is already taken in the destination network namespace.
7384 *
7385 * This function shuts down a device interface and moves it
7386 * to a new network namespace. On success 0 is returned, on
7387 * a failure a netagive errno code is returned.
7388 *
7389 * Callers must hold the rtnl semaphore.
7390 */
7391
7392int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7393{
Eric W. Biedermance286d32007-09-12 13:53:49 +02007394 int err;
7395
7396 ASSERT_RTNL();
7397
7398 /* Don't allow namespace local devices to be moved. */
7399 err = -EINVAL;
7400 if (dev->features & NETIF_F_NETNS_LOCAL)
7401 goto out;
7402
7403 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02007404 if (dev->reg_state != NETREG_REGISTERED)
7405 goto out;
7406
7407 /* Get out if there is nothing todo */
7408 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09007409 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02007410 goto out;
7411
7412 /* Pick the destination device name, and ensure
7413 * we can use it in the destination network namespace.
7414 */
7415 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00007416 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007417 /* We get here if we can't use the current device name */
7418 if (!pat)
7419 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00007420 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007421 goto out;
7422 }
7423
7424 /*
7425 * And now a mini version of register_netdevice unregister_netdevice.
7426 */
7427
7428 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07007429 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007430
7431 /* And unlink it from device chain */
7432 err = -ENODEV;
7433 unlist_netdevice(dev);
7434
7435 synchronize_net();
7436
7437 /* Shutdown queueing discipline. */
7438 dev_shutdown(dev);
7439
7440 /* Notify protocols, that we are about to destroy
7441 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00007442
7443 Note that dev->reg_state stays at NETREG_REGISTERED.
7444 This is wanted because this way 8021q and macvlan know
7445 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02007446 */
7447 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00007448 rcu_barrier();
7449 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007450 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007451
7452 /*
7453 * Flush the unicast and multicast chains
7454 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007455 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00007456 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007457
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007458 /* Send a netdev-removed uevent to the old namespace */
7459 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007460 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007461
Eric W. Biedermance286d32007-09-12 13:53:49 +02007462 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007463 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007464
Eric W. Biedermance286d32007-09-12 13:53:49 +02007465 /* If there is an ifindex conflict assign a new one */
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02007466 if (__dev_get_by_index(net, dev->ifindex))
Eric W. Biedermance286d32007-09-12 13:53:49 +02007467 dev->ifindex = dev_new_index(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007468
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007469 /* Send a netdev-add uevent to the new namespace */
7470 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007471 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007472
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007473 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07007474 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007475 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007476
7477 /* Add the device back in the hashes */
7478 list_netdevice(dev);
7479
7480 /* Notify protocols, that a new device appeared. */
7481 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7482
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007483 /*
7484 * Prevent userspace races by waiting until the network
7485 * device is fully setup before sending notifications.
7486 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007487 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007488
Eric W. Biedermance286d32007-09-12 13:53:49 +02007489 synchronize_net();
7490 err = 0;
7491out:
7492 return err;
7493}
Johannes Berg463d0182009-07-14 00:33:35 +02007494EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007495
Linus Torvalds1da177e2005-04-16 15:20:36 -07007496static int dev_cpu_callback(struct notifier_block *nfb,
7497 unsigned long action,
7498 void *ocpu)
7499{
7500 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007501 struct sk_buff *skb;
7502 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7503 struct softnet_data *sd, *oldsd;
7504
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007505 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506 return NOTIFY_OK;
7507
7508 local_irq_disable();
7509 cpu = smp_processor_id();
7510 sd = &per_cpu(softnet_data, cpu);
7511 oldsd = &per_cpu(softnet_data, oldcpu);
7512
7513 /* Find end of our completion_queue. */
7514 list_skb = &sd->completion_queue;
7515 while (*list_skb)
7516 list_skb = &(*list_skb)->next;
7517 /* Append completion queue from offline CPU. */
7518 *list_skb = oldsd->completion_queue;
7519 oldsd->completion_queue = NULL;
7520
Linus Torvalds1da177e2005-04-16 15:20:36 -07007521 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00007522 if (oldsd->output_queue) {
7523 *sd->output_queue_tailp = oldsd->output_queue;
7524 sd->output_queue_tailp = oldsd->output_queue_tailp;
7525 oldsd->output_queue = NULL;
7526 oldsd->output_queue_tailp = &oldsd->output_queue;
7527 }
Eric Dumazetac64da02015-01-15 17:04:22 -08007528 /* Append NAPI poll list from offline CPU, with one exception :
7529 * process_backlog() must be called by cpu owning percpu backlog.
7530 * We properly handle process_queue & input_pkt_queue later.
7531 */
7532 while (!list_empty(&oldsd->poll_list)) {
7533 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7534 struct napi_struct,
7535 poll_list);
7536
7537 list_del_init(&napi->poll_list);
7538 if (napi->poll == process_backlog)
7539 napi->state = 0;
7540 else
7541 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00007542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543
7544 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7545 local_irq_enable();
7546
7547 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00007548 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08007549 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00007550 input_queue_head_incr(oldsd);
7551 }
Eric Dumazetac64da02015-01-15 17:04:22 -08007552 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08007553 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00007554 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07007555 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007556
7557 return NOTIFY_OK;
7558}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007559
7560
Herbert Xu7f353bf2007-08-10 15:47:58 -07007561/**
Herbert Xub63365a2008-10-23 01:11:29 -07007562 * netdev_increment_features - increment feature set by one
7563 * @all: current feature set
7564 * @one: new feature set
7565 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07007566 *
7567 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07007568 * @one to the master device with current feature set @all. Will not
7569 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07007570 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007571netdev_features_t netdev_increment_features(netdev_features_t all,
7572 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07007573{
Michał Mirosław1742f182011-04-22 06:31:16 +00007574 if (mask & NETIF_F_GEN_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08007575 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +00007576 mask |= NETIF_F_VLAN_CHALLENGED;
7577
Tom Herberta1882222015-12-14 11:19:43 -08007578 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +00007579 all &= one | ~NETIF_F_ALL_FOR_ALL;
7580
Michał Mirosław1742f182011-04-22 06:31:16 +00007581 /* If one device supports hw checksumming, set for all. */
7582 if (all & NETIF_F_GEN_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -08007583 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007584
7585 return all;
7586}
Herbert Xub63365a2008-10-23 01:11:29 -07007587EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007588
Baruch Siach430f03c2013-06-02 20:43:55 +00007589static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007590{
7591 int i;
7592 struct hlist_head *hash;
7593
7594 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7595 if (hash != NULL)
7596 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7597 INIT_HLIST_HEAD(&hash[i]);
7598
7599 return hash;
7600}
7601
Eric W. Biederman881d9662007-09-17 11:56:21 -07007602/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07007603static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007604{
Rustad, Mark D734b6542012-07-18 09:06:07 +00007605 if (net != &init_net)
7606 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07007607
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007608 net->dev_name_head = netdev_create_hash();
7609 if (net->dev_name_head == NULL)
7610 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007611
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007612 net->dev_index_head = netdev_create_hash();
7613 if (net->dev_index_head == NULL)
7614 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007615
7616 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007617
7618err_idx:
7619 kfree(net->dev_name_head);
7620err_name:
7621 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007622}
7623
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007624/**
7625 * netdev_drivername - network driver for the device
7626 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007627 *
7628 * Determine network driver for device.
7629 */
David S. Miller3019de12011-06-06 16:41:33 -07007630const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07007631{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07007632 const struct device_driver *driver;
7633 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07007634 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07007635
7636 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007637 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07007638 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007639
7640 driver = parent->driver;
7641 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07007642 return driver->name;
7643 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007644}
7645
Joe Perches6ea754e2014-09-22 11:10:50 -07007646static void __netdev_printk(const char *level, const struct net_device *dev,
7647 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00007648{
Joe Perchesb004ff42012-09-12 20:12:19 -07007649 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07007650 dev_printk_emit(level[1] - '0',
7651 dev->dev.parent,
7652 "%s %s %s%s: %pV",
7653 dev_driver_string(dev->dev.parent),
7654 dev_name(dev->dev.parent),
7655 netdev_name(dev), netdev_reg_state(dev),
7656 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007657 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07007658 printk("%s%s%s: %pV",
7659 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007660 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07007661 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007662 }
Joe Perches256df2f2010-06-27 01:02:35 +00007663}
7664
Joe Perches6ea754e2014-09-22 11:10:50 -07007665void netdev_printk(const char *level, const struct net_device *dev,
7666 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00007667{
7668 struct va_format vaf;
7669 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00007670
7671 va_start(args, format);
7672
7673 vaf.fmt = format;
7674 vaf.va = &args;
7675
Joe Perches6ea754e2014-09-22 11:10:50 -07007676 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007677
Joe Perches256df2f2010-06-27 01:02:35 +00007678 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00007679}
7680EXPORT_SYMBOL(netdev_printk);
7681
7682#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07007683void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00007684{ \
Joe Perches256df2f2010-06-27 01:02:35 +00007685 struct va_format vaf; \
7686 va_list args; \
7687 \
7688 va_start(args, fmt); \
7689 \
7690 vaf.fmt = fmt; \
7691 vaf.va = &args; \
7692 \
Joe Perches6ea754e2014-09-22 11:10:50 -07007693 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007694 \
Joe Perches256df2f2010-06-27 01:02:35 +00007695 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00007696} \
7697EXPORT_SYMBOL(func);
7698
7699define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7700define_netdev_printk_level(netdev_alert, KERN_ALERT);
7701define_netdev_printk_level(netdev_crit, KERN_CRIT);
7702define_netdev_printk_level(netdev_err, KERN_ERR);
7703define_netdev_printk_level(netdev_warn, KERN_WARNING);
7704define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7705define_netdev_printk_level(netdev_info, KERN_INFO);
7706
Pavel Emelyanov46650792007-10-08 20:38:39 -07007707static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007708{
7709 kfree(net->dev_name_head);
7710 kfree(net->dev_index_head);
7711}
7712
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007713static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007714 .init = netdev_init,
7715 .exit = netdev_exit,
7716};
7717
Pavel Emelyanov46650792007-10-08 20:38:39 -07007718static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007719{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007720 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007721 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007722 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007723 * initial network namespace
7724 */
7725 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007726 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007727 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007728 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007729
7730 /* Ignore unmoveable devices (i.e. loopback) */
7731 if (dev->features & NETIF_F_NETNS_LOCAL)
7732 continue;
7733
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007734 /* Leave virtual devices for the generic cleanup */
7735 if (dev->rtnl_link_ops)
7736 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007737
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007738 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007739 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7740 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007741 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007742 pr_emerg("%s: failed to move %s to init_net: %d\n",
7743 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007744 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007745 }
7746 }
7747 rtnl_unlock();
7748}
7749
Eric W. Biederman50624c92013-09-23 21:19:49 -07007750static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7751{
7752 /* Return with the rtnl_lock held when there are no network
7753 * devices unregistering in any network namespace in net_list.
7754 */
7755 struct net *net;
7756 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01007757 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007758
Peter Zijlstraff960a72014-10-29 17:04:56 +01007759 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007760 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07007761 unregistering = false;
7762 rtnl_lock();
7763 list_for_each_entry(net, net_list, exit_list) {
7764 if (net->dev_unreg_count > 0) {
7765 unregistering = true;
7766 break;
7767 }
7768 }
7769 if (!unregistering)
7770 break;
7771 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01007772
7773 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007774 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01007775 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007776}
7777
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007778static void __net_exit default_device_exit_batch(struct list_head *net_list)
7779{
7780 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007781 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007782 * Do this across as many network namespaces as possible to
7783 * improve batching efficiency.
7784 */
7785 struct net_device *dev;
7786 struct net *net;
7787 LIST_HEAD(dev_kill_list);
7788
Eric W. Biederman50624c92013-09-23 21:19:49 -07007789 /* To prevent network device cleanup code from dereferencing
7790 * loopback devices or network devices that have been freed
7791 * wait here for all pending unregistrations to complete,
7792 * before unregistring the loopback device and allowing the
7793 * network namespace be freed.
7794 *
7795 * The netdev todo list containing all network devices
7796 * unregistrations that happen in default_device_exit_batch
7797 * will run in the rtnl_unlock() at the end of
7798 * default_device_exit_batch.
7799 */
7800 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007801 list_for_each_entry(net, net_list, exit_list) {
7802 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02007803 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007804 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7805 else
7806 unregister_netdevice_queue(dev, &dev_kill_list);
7807 }
7808 }
7809 unregister_netdevice_many(&dev_kill_list);
7810 rtnl_unlock();
7811}
7812
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007813static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007814 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007815 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007816};
7817
Linus Torvalds1da177e2005-04-16 15:20:36 -07007818/*
7819 * Initialize the DEV module. At boot time this walks the device list and
7820 * unhooks any devices that fail to initialise (normally hardware not
7821 * present) and leaves us with a valid list of present and active devices.
7822 *
7823 */
7824
7825/*
7826 * This is called single threaded during boot, so no need
7827 * to take the rtnl semaphore.
7828 */
7829static int __init net_dev_init(void)
7830{
7831 int i, rc = -ENOMEM;
7832
7833 BUG_ON(!dev_boot_phase);
7834
Linus Torvalds1da177e2005-04-16 15:20:36 -07007835 if (dev_proc_init())
7836 goto out;
7837
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007838 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007839 goto out;
7840
7841 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007842 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007843 INIT_LIST_HEAD(&ptype_base[i]);
7844
Vlad Yasevich62532da2012-11-15 08:49:10 +00007845 INIT_LIST_HEAD(&offload_base);
7846
Eric W. Biederman881d9662007-09-17 11:56:21 -07007847 if (register_pernet_subsys(&netdev_net_ops))
7848 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007849
7850 /*
7851 * Initialise the packet receive queues.
7852 */
7853
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007854 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007855 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007857 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007858 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007859 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007860 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007861#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007862 sd->csd.func = rps_trigger_softirq;
7863 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007864 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007865#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007866
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007867 sd->backlog.poll = process_backlog;
7868 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007869 }
7870
Linus Torvalds1da177e2005-04-16 15:20:36 -07007871 dev_boot_phase = 0;
7872
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007873 /* The loopback device is special if any other network devices
7874 * is present in a network namespace the loopback device must
7875 * be present. Since we now dynamically allocate and free the
7876 * loopback device ensure this invariant is maintained by
7877 * keeping the loopback device as the first device on the
7878 * list of network devices. Ensuring the loopback devices
7879 * is the first device that appears and the last network device
7880 * that disappears.
7881 */
7882 if (register_pernet_device(&loopback_net_ops))
7883 goto out;
7884
7885 if (register_pernet_device(&default_device_ops))
7886 goto out;
7887
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007888 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7889 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007890
7891 hotcpu_notifier(dev_cpu_callback, 0);
Thomas Graff38a9eb2015-07-21 10:43:56 +02007892 dst_subsys_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007893 rc = 0;
7894out:
7895 return rc;
7896}
7897
7898subsys_initcall(net_dev_init);