blob: aa82f9ab6a36d164769bf7c9633fcdfd5971466f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700121#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700122#include <linux/ipv6.h>
123#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700124#include <linux/jhash.h>
125#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700126#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900127#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900128#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000129#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700130#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000131#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100132#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300133#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700134#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100135#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400136#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800137#include <linux/hrtimer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700139#include "net-sysfs.h"
140
Herbert Xud565b0a2008-12-15 23:38:52 -0800141/* Instead of increasing this, you should create a hash table. */
142#define MAX_GRO_SKBS 8
143
Herbert Xu5d38a072009-01-04 16:13:40 -0800144/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128)
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000148static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000149struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
150struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000151static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000153static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli5495119462014-07-01 21:39:43 -0700154static int call_netdevice_notifiers_info(unsigned long val,
155 struct net_device *dev,
156 struct netdev_notifier_info *info);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700159 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 * semaphore.
161 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800162 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 *
164 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700165 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it.
168 *
169 * To put it another way, dev_base_lock is held for writing only to
170 * protect against pure readers; the rtnl semaphore provides the
171 * protection against other writers.
172 *
173 * See, for example usages, register_netdevice() and
174 * unregister_netdevice(), which must be called with the rtnl
175 * semaphore held.
176 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178EXPORT_SYMBOL(dev_base_lock);
179
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300180/* protects napi_hash addition/deletion and napi_gen_id */
181static DEFINE_SPINLOCK(napi_hash_lock);
182
183static unsigned int napi_gen_id;
184static DEFINE_HASHTABLE(napi_hash, 8);
185
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200186static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000187
Thomas Graf4e985ad2011-06-21 03:11:20 +0000188static inline void dev_base_seq_inc(struct net *net)
189{
190 while (++net->dev_base_seq == 0);
191}
192
Eric W. Biederman881d9662007-09-17 11:56:21 -0700193static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Eric Dumazet95c96172012-04-15 05:58:06 +0000195 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
196
stephen hemminger08e98972009-11-10 07:20:34 +0000197 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700202 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000205static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000206{
207#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000208 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000209#endif
210}
211
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000212static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000213{
214#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000215 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000216#endif
217}
218
Eric W. Biedermance286d32007-09-12 13:53:49 +0200219/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000220static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200221{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900222 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200223
224 ASSERT_RTNL();
225
226 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800227 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000228 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000229 hlist_add_head_rcu(&dev->index_hlist,
230 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000232
233 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234}
235
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000236/* Device list removal
237 * caller must respect a RCU grace period before freeing/reusing dev
238 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239static void unlist_netdevice(struct net_device *dev)
240{
241 ASSERT_RTNL();
242
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800245 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000246 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000247 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200248 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000249
250 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200251}
252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253/*
254 * Our notifier list
255 */
256
Alan Sternf07d5b92006-05-09 15:23:03 -0700257static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
259/*
260 * Device drivers call our routines to queue packets here. We empty the
261 * queue in the local softnet handler.
262 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700263
Eric Dumazet9958da02010-04-17 04:17:02 +0000264DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700265EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
David S. Millercf508b12008-07-22 14:16:42 -0700267#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700268/*
David S. Millerc773e842008-07-08 23:13:53 -0700269 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700270 * according to dev->type
271 */
272static const unsigned short netdev_lock_type[] =
273 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
274 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
275 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
276 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
277 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
278 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
279 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
280 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
281 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
282 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
283 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
284 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400285 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
286 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
287 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700288
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700289static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400302 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
303 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
304 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305
306static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700307static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700308
309static inline unsigned short netdev_lock_pos(unsigned short dev_type)
310{
311 int i;
312
313 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
314 if (netdev_lock_type[i] == dev_type)
315 return i;
316 /* the last key is used by default */
317 return ARRAY_SIZE(netdev_lock_type) - 1;
318}
319
David S. Millercf508b12008-07-22 14:16:42 -0700320static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
321 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700322{
323 int i;
324
325 i = netdev_lock_pos(dev_type);
326 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
327 netdev_lock_name[i]);
328}
David S. Millercf508b12008-07-22 14:16:42 -0700329
330static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
331{
332 int i;
333
334 i = netdev_lock_pos(dev->type);
335 lockdep_set_class_and_name(&dev->addr_list_lock,
336 &netdev_addr_lock_key[i],
337 netdev_lock_name[i]);
338}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700339#else
David S. Millercf508b12008-07-22 14:16:42 -0700340static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341 unsigned short dev_type)
342{
343}
344static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700345{
346}
347#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349/*******************************************************************************
350
351 Protocol management and registration routines
352
353*******************************************************************************/
354
355/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 * Add a protocol ID to the list. Now that the input handler is
357 * smarter we can dispense with all the messy stuff that used to be
358 * here.
359 *
360 * BEWARE!!! Protocol handlers, mangling input packets,
361 * MUST BE last in hash buckets and checking protocol handlers
362 * MUST start from promiscuous ptype_all chain in net_bh.
363 * It is true now, do not change it.
364 * Explanation follows: if protocol handler, mangling packet, will
365 * be the first on list, it is not able to sense, that packet
366 * is cloned and should be copied-on-write, so that it will
367 * change it and subsequent readers will get broken packet.
368 * --ANK (980803)
369 */
370
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000371static inline struct list_head *ptype_head(const struct packet_type *pt)
372{
373 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800374 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000375 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800376 return pt->dev ? &pt->dev->ptype_specific :
377 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000378}
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380/**
381 * dev_add_pack - add packet handler
382 * @pt: packet type declaration
383 *
384 * Add a protocol handler to the networking stack. The passed &packet_type
385 * is linked into kernel lists and may not be freed until it has been
386 * removed from the kernel lists.
387 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900388 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 * guarantee all CPU's that are in middle of receiving packets
390 * will see the new packet type (until the next received packet).
391 */
392
393void dev_add_pack(struct packet_type *pt)
394{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000395 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000397 spin_lock(&ptype_lock);
398 list_add_rcu(&pt->list, head);
399 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700401EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403/**
404 * __dev_remove_pack - remove packet handler
405 * @pt: packet type declaration
406 *
407 * Remove a protocol handler that was previously added to the kernel
408 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
409 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900410 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 *
412 * The packet type might still be in use by receivers
413 * and must not be freed until after all the CPU's have gone
414 * through a quiescent state.
415 */
416void __dev_remove_pack(struct packet_type *pt)
417{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000418 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 struct packet_type *pt1;
420
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000421 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 list_for_each_entry(pt1, head, list) {
424 if (pt == pt1) {
425 list_del_rcu(&pt->list);
426 goto out;
427 }
428 }
429
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000430 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000432 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700434EXPORT_SYMBOL(__dev_remove_pack);
435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436/**
437 * dev_remove_pack - remove packet handler
438 * @pt: packet type declaration
439 *
440 * Remove a protocol handler that was previously added to the kernel
441 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
442 * from the kernel lists and can be freed or reused once this function
443 * returns.
444 *
445 * This call sleeps to guarantee that no CPU is looking at the packet
446 * type after return.
447 */
448void dev_remove_pack(struct packet_type *pt)
449{
450 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900451
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 synchronize_net();
453}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700454EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Vlad Yasevich62532da2012-11-15 08:49:10 +0000456
457/**
458 * dev_add_offload - register offload handlers
459 * @po: protocol offload declaration
460 *
461 * Add protocol offload handlers to the networking stack. The passed
462 * &proto_offload is linked into kernel lists and may not be freed until
463 * it has been removed from the kernel lists.
464 *
465 * This call does not sleep therefore it can not
466 * guarantee all CPU's that are in middle of receiving packets
467 * will see the new offload handlers (until the next received packet).
468 */
469void dev_add_offload(struct packet_offload *po)
470{
471 struct list_head *head = &offload_base;
472
473 spin_lock(&offload_lock);
474 list_add_rcu(&po->list, head);
475 spin_unlock(&offload_lock);
476}
477EXPORT_SYMBOL(dev_add_offload);
478
479/**
480 * __dev_remove_offload - remove offload handler
481 * @po: packet offload declaration
482 *
483 * Remove a protocol offload handler that was previously added to the
484 * kernel offload handlers by dev_add_offload(). The passed &offload_type
485 * is removed from the kernel lists and can be freed or reused once this
486 * function returns.
487 *
488 * The packet type might still be in use by receivers
489 * and must not be freed until after all the CPU's have gone
490 * through a quiescent state.
491 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800492static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000493{
494 struct list_head *head = &offload_base;
495 struct packet_offload *po1;
496
Eric Dumazetc53aa502012-11-16 08:08:23 +0000497 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000498
499 list_for_each_entry(po1, head, list) {
500 if (po == po1) {
501 list_del_rcu(&po->list);
502 goto out;
503 }
504 }
505
506 pr_warn("dev_remove_offload: %p not found\n", po);
507out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000508 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000509}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000510
511/**
512 * dev_remove_offload - remove packet offload handler
513 * @po: packet offload declaration
514 *
515 * Remove a packet offload handler that was previously added to the kernel
516 * offload handlers by dev_add_offload(). The passed &offload_type is
517 * removed from the kernel lists and can be freed or reused once this
518 * function returns.
519 *
520 * This call sleeps to guarantee that no CPU is looking at the packet
521 * type after return.
522 */
523void dev_remove_offload(struct packet_offload *po)
524{
525 __dev_remove_offload(po);
526
527 synchronize_net();
528}
529EXPORT_SYMBOL(dev_remove_offload);
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531/******************************************************************************
532
533 Device Boot-time Settings Routines
534
535*******************************************************************************/
536
537/* Boot time configuration table */
538static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
539
540/**
541 * netdev_boot_setup_add - add new setup entry
542 * @name: name of the device
543 * @map: configured settings for the device
544 *
545 * Adds new setup entry to the dev_boot_setup list. The function
546 * returns 0 on error and 1 on success. This is a generic routine to
547 * all netdevices.
548 */
549static int netdev_boot_setup_add(char *name, struct ifmap *map)
550{
551 struct netdev_boot_setup *s;
552 int i;
553
554 s = dev_boot_setup;
555 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
556 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
557 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700558 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 memcpy(&s[i].map, map, sizeof(s[i].map));
560 break;
561 }
562 }
563
564 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
565}
566
567/**
568 * netdev_boot_setup_check - check boot time settings
569 * @dev: the netdevice
570 *
571 * Check boot time settings for the device.
572 * The found settings are set for the device to be used
573 * later in the device probing.
574 * Returns 0 if no settings found, 1 if they are.
575 */
576int netdev_boot_setup_check(struct net_device *dev)
577{
578 struct netdev_boot_setup *s = dev_boot_setup;
579 int i;
580
581 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
582 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700583 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 dev->irq = s[i].map.irq;
585 dev->base_addr = s[i].map.base_addr;
586 dev->mem_start = s[i].map.mem_start;
587 dev->mem_end = s[i].map.mem_end;
588 return 1;
589 }
590 }
591 return 0;
592}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700593EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595
596/**
597 * netdev_boot_base - get address from boot time settings
598 * @prefix: prefix for network device
599 * @unit: id for network device
600 *
601 * Check boot time settings for the base address of device.
602 * The found settings are set for the device to be used
603 * later in the device probing.
604 * Returns 0 if no settings found.
605 */
606unsigned long netdev_boot_base(const char *prefix, int unit)
607{
608 const struct netdev_boot_setup *s = dev_boot_setup;
609 char name[IFNAMSIZ];
610 int i;
611
612 sprintf(name, "%s%d", prefix, unit);
613
614 /*
615 * If device already registered then return base of 1
616 * to indicate not to probe for this interface
617 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700618 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 return 1;
620
621 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
622 if (!strcmp(name, s[i].name))
623 return s[i].map.base_addr;
624 return 0;
625}
626
627/*
628 * Saves at boot time configured settings for any netdevice.
629 */
630int __init netdev_boot_setup(char *str)
631{
632 int ints[5];
633 struct ifmap map;
634
635 str = get_options(str, ARRAY_SIZE(ints), ints);
636 if (!str || !*str)
637 return 0;
638
639 /* Save settings */
640 memset(&map, 0, sizeof(map));
641 if (ints[0] > 0)
642 map.irq = ints[1];
643 if (ints[0] > 1)
644 map.base_addr = ints[2];
645 if (ints[0] > 2)
646 map.mem_start = ints[3];
647 if (ints[0] > 3)
648 map.mem_end = ints[4];
649
650 /* Add new entry to the list */
651 return netdev_boot_setup_add(str, &map);
652}
653
654__setup("netdev=", netdev_boot_setup);
655
656/*******************************************************************************
657
658 Device Interface Subroutines
659
660*******************************************************************************/
661
662/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200663 * dev_get_iflink - get 'iflink' value of a interface
664 * @dev: targeted interface
665 *
666 * Indicates the ifindex the interface is linked to.
667 * Physical interfaces have the same 'ifindex' and 'iflink' values.
668 */
669
670int dev_get_iflink(const struct net_device *dev)
671{
672 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
673 return dev->netdev_ops->ndo_get_iflink(dev);
674
Nicolas Dichtele1622ba2015-04-02 17:07:10 +0200675 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
676 if (dev->rtnl_link_ops)
677 return 0;
678
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200679 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200680}
681EXPORT_SYMBOL(dev_get_iflink);
682
683/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700685 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 * @name: name to find
687 *
688 * Find an interface by name. Must be called under RTNL semaphore
689 * or @dev_base_lock. If the name is found a pointer to the device
690 * is returned. If the name is not found then %NULL is returned. The
691 * reference counters are not incremented so the caller must be
692 * careful with locks.
693 */
694
Eric W. Biederman881d9662007-09-17 11:56:21 -0700695struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700697 struct net_device *dev;
698 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Sasha Levinb67bfe02013-02-27 17:06:00 -0800700 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 if (!strncmp(dev->name, name, IFNAMSIZ))
702 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 return NULL;
705}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700706EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
708/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000709 * dev_get_by_name_rcu - find a device by its name
710 * @net: the applicable net namespace
711 * @name: name to find
712 *
713 * Find an interface by name.
714 * If the name is found a pointer to the device is returned.
715 * If the name is not found then %NULL is returned.
716 * The reference counters are not incremented so the caller must be
717 * careful with locks. The caller must hold RCU lock.
718 */
719
720struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
721{
Eric Dumazet72c95282009-10-30 07:11:27 +0000722 struct net_device *dev;
723 struct hlist_head *head = dev_name_hash(net, name);
724
Sasha Levinb67bfe02013-02-27 17:06:00 -0800725 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000726 if (!strncmp(dev->name, name, IFNAMSIZ))
727 return dev;
728
729 return NULL;
730}
731EXPORT_SYMBOL(dev_get_by_name_rcu);
732
733/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700735 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 * @name: name to find
737 *
738 * Find an interface by name. This can be called from any
739 * context and does its own locking. The returned handle has
740 * the usage count incremented and the caller must use dev_put() to
741 * release it when it is no longer needed. %NULL is returned if no
742 * matching device is found.
743 */
744
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
747 struct net_device *dev;
748
Eric Dumazet72c95282009-10-30 07:11:27 +0000749 rcu_read_lock();
750 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (dev)
752 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000753 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 return dev;
755}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700756EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758/**
759 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700760 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 * @ifindex: index of device
762 *
763 * Search for an interface by index. Returns %NULL if the device
764 * is not found or a pointer to the device. The device has not
765 * had its reference counter increased so the caller must be careful
766 * about locking. The caller must hold either the RTNL semaphore
767 * or @dev_base_lock.
768 */
769
Eric W. Biederman881d9662007-09-17 11:56:21 -0700770struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700772 struct net_device *dev;
773 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Sasha Levinb67bfe02013-02-27 17:06:00 -0800775 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 if (dev->ifindex == ifindex)
777 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700778
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 return NULL;
780}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700781EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000783/**
784 * dev_get_by_index_rcu - find a device by its ifindex
785 * @net: the applicable net namespace
786 * @ifindex: index of device
787 *
788 * Search for an interface by index. Returns %NULL if the device
789 * is not found or a pointer to the device. The device has not
790 * had its reference counter increased so the caller must be careful
791 * about locking. The caller must hold RCU lock.
792 */
793
794struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
795{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000796 struct net_device *dev;
797 struct hlist_head *head = dev_index_hash(net, ifindex);
798
Sasha Levinb67bfe02013-02-27 17:06:00 -0800799 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000800 if (dev->ifindex == ifindex)
801 return dev;
802
803 return NULL;
804}
805EXPORT_SYMBOL(dev_get_by_index_rcu);
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
808/**
809 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700810 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 * @ifindex: index of device
812 *
813 * Search for an interface by index. Returns NULL if the device
814 * is not found or a pointer to the device. The device returned has
815 * had a reference added and the pointer is safe until the user calls
816 * dev_put to indicate they have finished with it.
817 */
818
Eric W. Biederman881d9662007-09-17 11:56:21 -0700819struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820{
821 struct net_device *dev;
822
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000823 rcu_read_lock();
824 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (dev)
826 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000827 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 return dev;
829}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700830EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
832/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200833 * netdev_get_name - get a netdevice name, knowing its ifindex.
834 * @net: network namespace
835 * @name: a pointer to the buffer where the name will be stored.
836 * @ifindex: the ifindex of the interface to get the name from.
837 *
838 * The use of raw_seqcount_begin() and cond_resched() before
839 * retrying is required as we want to give the writers a chance
840 * to complete when CONFIG_PREEMPT is not set.
841 */
842int netdev_get_name(struct net *net, char *name, int ifindex)
843{
844 struct net_device *dev;
845 unsigned int seq;
846
847retry:
848 seq = raw_seqcount_begin(&devnet_rename_seq);
849 rcu_read_lock();
850 dev = dev_get_by_index_rcu(net, ifindex);
851 if (!dev) {
852 rcu_read_unlock();
853 return -ENODEV;
854 }
855
856 strcpy(name, dev->name);
857 rcu_read_unlock();
858 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
859 cond_resched();
860 goto retry;
861 }
862
863 return 0;
864}
865
866/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000867 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700868 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 * @type: media type of device
870 * @ha: hardware address
871 *
872 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800873 * is not found or a pointer to the device.
874 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000875 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 * and the caller must therefore be careful about locking
877 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 */
879
Eric Dumazet941666c2010-12-05 01:23:53 +0000880struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
881 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
883 struct net_device *dev;
884
Eric Dumazet941666c2010-12-05 01:23:53 +0000885 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 if (dev->type == type &&
887 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700888 return dev;
889
890 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
Eric Dumazet941666c2010-12-05 01:23:53 +0000892EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300893
Eric W. Biederman881d9662007-09-17 11:56:21 -0700894struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700895{
896 struct net_device *dev;
897
898 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700899 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700900 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700901 return dev;
902
903 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700904}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700905EXPORT_SYMBOL(__dev_getfirstbyhwtype);
906
Eric W. Biederman881d9662007-09-17 11:56:21 -0700907struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000909 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000911 rcu_read_lock();
912 for_each_netdev_rcu(net, dev)
913 if (dev->type == type) {
914 dev_hold(dev);
915 ret = dev;
916 break;
917 }
918 rcu_read_unlock();
919 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921EXPORT_SYMBOL(dev_getfirstbyhwtype);
922
923/**
WANG Cong6c555492014-09-11 15:35:09 -0700924 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700925 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 * @if_flags: IFF_* values
927 * @mask: bitmask of bits in if_flags to check
928 *
929 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000930 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -0700931 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 */
933
WANG Cong6c555492014-09-11 15:35:09 -0700934struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
935 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700937 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
WANG Cong6c555492014-09-11 15:35:09 -0700939 ASSERT_RTNL();
940
Pavel Emelianov7562f872007-05-03 15:13:45 -0700941 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -0700942 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700944 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 break;
946 }
947 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700948 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949}
WANG Cong6c555492014-09-11 15:35:09 -0700950EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
952/**
953 * dev_valid_name - check if name is okay for network device
954 * @name: name string
955 *
956 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700957 * to allow sysfs to work. We also disallow any kind of
958 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 */
David S. Miller95f050b2012-03-06 16:12:15 -0500960bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700962 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500963 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700964 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500965 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700966 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500967 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700968
969 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -0600970 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500971 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700972 name++;
973 }
David S. Miller95f050b2012-03-06 16:12:15 -0500974 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700976EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200979 * __dev_alloc_name - allocate a name for a device
980 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200982 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 *
984 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700985 * id. It scans list of devices to build up a free map, then chooses
986 * the first empty slot. The caller must hold the dev_base or rtnl lock
987 * while allocating the name and adding the device in order to avoid
988 * duplicates.
989 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
990 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 */
992
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200993static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994{
995 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 const char *p;
997 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700998 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 struct net_device *d;
1000
1001 p = strnchr(name, IFNAMSIZ-1, '%');
1002 if (p) {
1003 /*
1004 * Verify the string as this thing may have come from
1005 * the user. There must be either one "%d" and no other "%"
1006 * characters.
1007 */
1008 if (p[1] != 'd' || strchr(p + 2, '%'))
1009 return -EINVAL;
1010
1011 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001012 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 if (!inuse)
1014 return -ENOMEM;
1015
Eric W. Biederman881d9662007-09-17 11:56:21 -07001016 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 if (!sscanf(d->name, name, &i))
1018 continue;
1019 if (i < 0 || i >= max_netdevices)
1020 continue;
1021
1022 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001023 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 if (!strncmp(buf, d->name, IFNAMSIZ))
1025 set_bit(i, inuse);
1026 }
1027
1028 i = find_first_zero_bit(inuse, max_netdevices);
1029 free_page((unsigned long) inuse);
1030 }
1031
Octavian Purdilad9031022009-11-18 02:36:59 +00001032 if (buf != name)
1033 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001034 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 /* It is possible to run out of possible slots
1038 * when the name is long and there isn't enough space left
1039 * for the digits, or if all bits are used.
1040 */
1041 return -ENFILE;
1042}
1043
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001044/**
1045 * dev_alloc_name - allocate a name for a device
1046 * @dev: device
1047 * @name: name format string
1048 *
1049 * Passed a format string - eg "lt%d" it will try and find a suitable
1050 * id. It scans list of devices to build up a free map, then chooses
1051 * the first empty slot. The caller must hold the dev_base or rtnl lock
1052 * while allocating the name and adding the device in order to avoid
1053 * duplicates.
1054 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055 * Returns the number of the unit assigned or a negative errno code.
1056 */
1057
1058int dev_alloc_name(struct net_device *dev, const char *name)
1059{
1060 char buf[IFNAMSIZ];
1061 struct net *net;
1062 int ret;
1063
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001064 BUG_ON(!dev_net(dev));
1065 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001066 ret = __dev_alloc_name(net, name, buf);
1067 if (ret >= 0)
1068 strlcpy(dev->name, buf, IFNAMSIZ);
1069 return ret;
1070}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001071EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001072
Gao feng828de4f2012-09-13 20:58:27 +00001073static int dev_alloc_name_ns(struct net *net,
1074 struct net_device *dev,
1075 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001076{
Gao feng828de4f2012-09-13 20:58:27 +00001077 char buf[IFNAMSIZ];
1078 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001079
Gao feng828de4f2012-09-13 20:58:27 +00001080 ret = __dev_alloc_name(net, name, buf);
1081 if (ret >= 0)
1082 strlcpy(dev->name, buf, IFNAMSIZ);
1083 return ret;
1084}
1085
1086static int dev_get_valid_name(struct net *net,
1087 struct net_device *dev,
1088 const char *name)
1089{
1090 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001091
Octavian Purdilad9031022009-11-18 02:36:59 +00001092 if (!dev_valid_name(name))
1093 return -EINVAL;
1094
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001095 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001096 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001097 else if (__dev_get_by_name(net, name))
1098 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001099 else if (dev->name != name)
1100 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001101
1102 return 0;
1103}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
1105/**
1106 * dev_change_name - change name of a device
1107 * @dev: device
1108 * @newname: name (or format string) must be at least IFNAMSIZ
1109 *
1110 * Change name of a device, can pass format strings "eth%d".
1111 * for wildcarding.
1112 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001113int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114{
Tom Gundersen238fa362014-07-14 16:37:23 +02001115 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001116 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001118 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001119 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
1121 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001122 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001124 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 if (dev->flags & IFF_UP)
1126 return -EBUSY;
1127
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001128 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001129
1130 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001131 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001132 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001133 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001134
Herbert Xufcc5a032007-07-30 17:03:38 -07001135 memcpy(oldname, dev->name, IFNAMSIZ);
1136
Gao feng828de4f2012-09-13 20:58:27 +00001137 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001138 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001139 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001140 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001143 if (oldname[0] && !strchr(oldname, '%'))
1144 netdev_info(dev, "renamed from %s\n", oldname);
1145
Tom Gundersen238fa362014-07-14 16:37:23 +02001146 old_assign_type = dev->name_assign_type;
1147 dev->name_assign_type = NET_NAME_RENAMED;
1148
Herbert Xufcc5a032007-07-30 17:03:38 -07001149rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001150 ret = device_rename(&dev->dev, dev->name);
1151 if (ret) {
1152 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001153 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001154 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001155 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001156 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001157
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001158 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001159
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001160 netdev_adjacent_rename_links(dev, oldname);
1161
Herbert Xu7f988ea2007-07-30 16:35:46 -07001162 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001163 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001164 write_unlock_bh(&dev_base_lock);
1165
1166 synchronize_rcu();
1167
1168 write_lock_bh(&dev_base_lock);
1169 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001170 write_unlock_bh(&dev_base_lock);
1171
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001172 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001173 ret = notifier_to_errno(ret);
1174
1175 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001176 /* err >= 0 after dev_alloc_name() or stores the first errno */
1177 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001178 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001179 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001180 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001181 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001182 dev->name_assign_type = old_assign_type;
1183 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001184 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001185 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001186 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001187 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001188 }
1189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 return err;
1192}
1193
1194/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001195 * dev_set_alias - change ifalias of a device
1196 * @dev: device
1197 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001198 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001199 *
1200 * Set ifalias for a device,
1201 */
1202int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1203{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001204 char *new_ifalias;
1205
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001206 ASSERT_RTNL();
1207
1208 if (len >= IFALIASZ)
1209 return -EINVAL;
1210
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001211 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001212 kfree(dev->ifalias);
1213 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001214 return 0;
1215 }
1216
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001217 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1218 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001219 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001220 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001221
1222 strlcpy(dev->ifalias, alias, len+1);
1223 return len;
1224}
1225
1226
1227/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001228 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001229 * @dev: device to cause notification
1230 *
1231 * Called to indicate a device has changed features.
1232 */
1233void netdev_features_change(struct net_device *dev)
1234{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001235 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001236}
1237EXPORT_SYMBOL(netdev_features_change);
1238
1239/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 * netdev_state_change - device changes state
1241 * @dev: device to cause notification
1242 *
1243 * Called to indicate a device has changed state. This function calls
1244 * the notifier chains for netdev_chain and sends a NEWLINK message
1245 * to the routing socket.
1246 */
1247void netdev_state_change(struct net_device *dev)
1248{
1249 if (dev->flags & IFF_UP) {
Loic Prylli5495119462014-07-01 21:39:43 -07001250 struct netdev_notifier_change_info change_info;
1251
1252 change_info.flags_changed = 0;
1253 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1254 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001255 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 }
1257}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001258EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Amerigo Wangee89bab2012-08-09 22:14:56 +00001260/**
1261 * netdev_notify_peers - notify network peers about existence of @dev
1262 * @dev: network device
1263 *
1264 * Generate traffic such that interested network peers are aware of
1265 * @dev, such as by generating a gratuitous ARP. This may be used when
1266 * a device wants to inform the rest of the network about some sort of
1267 * reconfiguration such as a failover event or virtual machine
1268 * migration.
1269 */
1270void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001271{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001272 rtnl_lock();
1273 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1274 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001275}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001276EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001277
Patrick McHardybd380812010-02-26 06:34:53 +00001278static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001280 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001281 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001283 ASSERT_RTNL();
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 if (!netif_device_present(dev))
1286 return -ENODEV;
1287
Neil Hormanca99ca12013-02-05 08:05:43 +00001288 /* Block netpoll from trying to do any rx path servicing.
1289 * If we don't do this there is a chance ndo_poll_controller
1290 * or ndo_poll may be running while we open the device
1291 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001292 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001293
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001294 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1295 ret = notifier_to_errno(ret);
1296 if (ret)
1297 return ret;
1298
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001300
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001301 if (ops->ndo_validate_addr)
1302 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001303
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001304 if (!ret && ops->ndo_open)
1305 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
Eric W. Biederman66b55522014-03-27 15:39:03 -07001307 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001308
Jeff Garzikbada3392007-10-23 20:19:37 -07001309 if (ret)
1310 clear_bit(__LINK_STATE_START, &dev->state);
1311 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001313 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001315 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 return ret;
1319}
Patrick McHardybd380812010-02-26 06:34:53 +00001320
1321/**
1322 * dev_open - prepare an interface for use.
1323 * @dev: device to open
1324 *
1325 * Takes a device from down to up state. The device's private open
1326 * function is invoked and then the multicast lists are loaded. Finally
1327 * the device is moved into the up state and a %NETDEV_UP message is
1328 * sent to the netdev notifier chain.
1329 *
1330 * Calling this function on an active interface is a nop. On a failure
1331 * a negative errno code is returned.
1332 */
1333int dev_open(struct net_device *dev)
1334{
1335 int ret;
1336
Patrick McHardybd380812010-02-26 06:34:53 +00001337 if (dev->flags & IFF_UP)
1338 return 0;
1339
Patrick McHardybd380812010-02-26 06:34:53 +00001340 ret = __dev_open(dev);
1341 if (ret < 0)
1342 return ret;
1343
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001344 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001345 call_netdevice_notifiers(NETDEV_UP, dev);
1346
1347 return ret;
1348}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001349EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
Octavian Purdila44345722010-12-13 12:44:07 +00001351static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352{
Octavian Purdila44345722010-12-13 12:44:07 +00001353 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001354
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001355 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001356 might_sleep();
1357
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001358 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001359 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001360 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001361
Octavian Purdila44345722010-12-13 12:44:07 +00001362 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
Octavian Purdila44345722010-12-13 12:44:07 +00001364 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Octavian Purdila44345722010-12-13 12:44:07 +00001366 /* Synchronize to scheduled poll. We cannot touch poll list, it
1367 * can be even on different cpu. So just clear netif_running().
1368 *
1369 * dev->stop() will invoke napi_disable() on all of it's
1370 * napi_struct instances on this device.
1371 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001372 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
Octavian Purdila44345722010-12-13 12:44:07 +00001375 dev_deactivate_many(head);
1376
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001377 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001378 const struct net_device_ops *ops = dev->netdev_ops;
1379
1380 /*
1381 * Call the device specific close. This cannot fail.
1382 * Only if device is UP
1383 *
1384 * We allow it to be called even after a DETACH hot-plug
1385 * event.
1386 */
1387 if (ops->ndo_stop)
1388 ops->ndo_stop(dev);
1389
Octavian Purdila44345722010-12-13 12:44:07 +00001390 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001391 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001392 }
1393
1394 return 0;
1395}
1396
1397static int __dev_close(struct net_device *dev)
1398{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001399 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001400 LIST_HEAD(single);
1401
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001402 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001403 retval = __dev_close_many(&single);
1404 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001405
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001406 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001407}
1408
David S. Miller99c4a262015-03-18 22:52:33 -04001409int dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001410{
1411 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001412
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001413 /* Remove the devices that don't need to be closed */
1414 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001415 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001416 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001417
1418 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001419
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001420 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001421 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001422 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001423 if (unlink)
1424 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 return 0;
1428}
David S. Miller99c4a262015-03-18 22:52:33 -04001429EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001430
1431/**
1432 * dev_close - shutdown an interface.
1433 * @dev: device to shutdown
1434 *
1435 * This function moves an active device into down state. A
1436 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1437 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1438 * chain.
1439 */
1440int dev_close(struct net_device *dev)
1441{
Eric Dumazete14a5992011-05-10 12:26:06 -07001442 if (dev->flags & IFF_UP) {
1443 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001444
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001445 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001446 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001447 list_del(&single);
1448 }
dingtianhongda6e3782013-05-27 19:53:31 +00001449 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001450}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001451EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001454/**
1455 * dev_disable_lro - disable Large Receive Offload on a device
1456 * @dev: device
1457 *
1458 * Disable Large Receive Offload (LRO) on a net device. Must be
1459 * called under RTNL. This is needed if received packets may be
1460 * forwarded to another interface.
1461 */
1462void dev_disable_lro(struct net_device *dev)
1463{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001464 struct net_device *lower_dev;
1465 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001466
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001467 dev->wanted_features &= ~NETIF_F_LRO;
1468 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001469
Michał Mirosław22d59692011-04-21 12:42:15 +00001470 if (unlikely(dev->features & NETIF_F_LRO))
1471 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001472
1473 netdev_for_each_lower_dev(dev, lower_dev, iter)
1474 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001475}
1476EXPORT_SYMBOL(dev_disable_lro);
1477
Jiri Pirko351638e2013-05-28 01:30:21 +00001478static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1479 struct net_device *dev)
1480{
1481 struct netdev_notifier_info info;
1482
1483 netdev_notifier_info_init(&info, dev);
1484 return nb->notifier_call(nb, val, &info);
1485}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001486
Eric W. Biederman881d9662007-09-17 11:56:21 -07001487static int dev_boot_phase = 1;
1488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489/**
1490 * register_netdevice_notifier - register a network notifier block
1491 * @nb: notifier
1492 *
1493 * Register a notifier to be called when network device events occur.
1494 * The notifier passed is linked into the kernel structures and must
1495 * not be reused until it has been unregistered. A negative errno code
1496 * is returned on a failure.
1497 *
1498 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001499 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 * view of the network device list.
1501 */
1502
1503int register_netdevice_notifier(struct notifier_block *nb)
1504{
1505 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001506 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001507 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 int err;
1509
1510 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001511 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001512 if (err)
1513 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001514 if (dev_boot_phase)
1515 goto unlock;
1516 for_each_net(net) {
1517 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001518 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001519 err = notifier_to_errno(err);
1520 if (err)
1521 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Eric W. Biederman881d9662007-09-17 11:56:21 -07001523 if (!(dev->flags & IFF_UP))
1524 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001525
Jiri Pirko351638e2013-05-28 01:30:21 +00001526 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001529
1530unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 rtnl_unlock();
1532 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001533
1534rollback:
1535 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001536 for_each_net(net) {
1537 for_each_netdev(net, dev) {
1538 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001539 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001540
Eric W. Biederman881d9662007-09-17 11:56:21 -07001541 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001542 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1543 dev);
1544 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001545 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001546 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001547 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001548 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001549
RongQing.Li8f891482011-11-30 23:43:07 -05001550outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001551 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001552 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001554EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556/**
1557 * unregister_netdevice_notifier - unregister a network notifier block
1558 * @nb: notifier
1559 *
1560 * Unregister a notifier previously registered by
1561 * register_netdevice_notifier(). The notifier is unlinked into the
1562 * kernel structures and may then be reused. A negative errno code
1563 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001564 *
1565 * After unregistering unregister and down device events are synthesized
1566 * for all devices on the device list to the removed notifier to remove
1567 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 */
1569
1570int unregister_netdevice_notifier(struct notifier_block *nb)
1571{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001572 struct net_device *dev;
1573 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001574 int err;
1575
1576 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001577 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001578 if (err)
1579 goto unlock;
1580
1581 for_each_net(net) {
1582 for_each_netdev(net, dev) {
1583 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001584 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1585 dev);
1586 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001587 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001588 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001589 }
1590 }
1591unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001592 rtnl_unlock();
1593 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001595EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
1597/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001598 * call_netdevice_notifiers_info - call all network notifier blocks
1599 * @val: value passed unmodified to notifier function
1600 * @dev: net_device pointer passed unmodified to notifier function
1601 * @info: notifier information data
1602 *
1603 * Call all network notifier blocks. Parameters and return value
1604 * are as for raw_notifier_call_chain().
1605 */
1606
stephen hemminger1d143d92013-12-29 14:01:29 -08001607static int call_netdevice_notifiers_info(unsigned long val,
1608 struct net_device *dev,
1609 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001610{
1611 ASSERT_RTNL();
1612 netdev_notifier_info_init(info, dev);
1613 return raw_notifier_call_chain(&netdev_chain, val, info);
1614}
Jiri Pirko351638e2013-05-28 01:30:21 +00001615
1616/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 * call_netdevice_notifiers - call all network notifier blocks
1618 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001619 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 *
1621 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001622 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 */
1624
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001625int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626{
Jiri Pirko351638e2013-05-28 01:30:21 +00001627 struct netdev_notifier_info info;
1628
1629 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001631EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
Daniel Borkmann45771392015-04-10 23:07:54 +02001633#ifdef CONFIG_NET_CLS_ACT
1634static struct static_key ingress_needed __read_mostly;
1635
1636void net_inc_ingress_queue(void)
1637{
1638 static_key_slow_inc(&ingress_needed);
1639}
1640EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1641
1642void net_dec_ingress_queue(void)
1643{
1644 static_key_slow_dec(&ingress_needed);
1645}
1646EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1647#endif
1648
Ingo Molnarc5905af2012-02-24 08:31:31 +01001649static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001650#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001651/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001652 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001653 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001654 */
1655static atomic_t netstamp_needed_deferred;
1656#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
1658void net_enable_timestamp(void)
1659{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001660#ifdef HAVE_JUMP_LABEL
1661 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1662
1663 if (deferred) {
1664 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001665 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001666 return;
1667 }
1668#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001669 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001671EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672
1673void net_disable_timestamp(void)
1674{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001675#ifdef HAVE_JUMP_LABEL
1676 if (in_interrupt()) {
1677 atomic_inc(&netstamp_needed_deferred);
1678 return;
1679 }
1680#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001681 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001683EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
Eric Dumazet3b098e22010-05-15 23:57:10 -07001685static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686{
Eric Dumazet588f0332011-11-15 04:12:55 +00001687 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001688 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001689 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690}
1691
Eric Dumazet588f0332011-11-15 04:12:55 +00001692#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001693 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001694 if ((COND) && !(SKB)->tstamp.tv64) \
1695 __net_timestamp(SKB); \
1696 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001697
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001698bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001699{
1700 unsigned int len;
1701
1702 if (!(dev->flags & IFF_UP))
1703 return false;
1704
1705 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1706 if (skb->len <= len)
1707 return true;
1708
1709 /* if TSO is enabled, we don't care about the length as the packet
1710 * could be forwarded without being segmented before
1711 */
1712 if (skb_is_gso(skb))
1713 return true;
1714
1715 return false;
1716}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001717EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001718
Herbert Xua0265d22014-04-17 13:45:03 +08001719int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1720{
Willem de Bruijnbbbf2df2015-06-08 11:53:08 -04001721 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1722 unlikely(!is_skb_forwardable(dev, skb))) {
Herbert Xua0265d22014-04-17 13:45:03 +08001723 atomic_long_inc(&dev->rx_dropped);
1724 kfree_skb(skb);
1725 return NET_RX_DROP;
1726 }
1727
1728 skb_scrub_packet(skb, true);
WANG Cong08b4b8e2015-03-20 14:29:09 -07001729 skb->priority = 0;
Herbert Xua0265d22014-04-17 13:45:03 +08001730 skb->protocol = eth_type_trans(skb, dev);
Jay Vosburgh2c26d342014-12-19 15:32:00 -08001731 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08001732
1733 return 0;
1734}
1735EXPORT_SYMBOL_GPL(__dev_forward_skb);
1736
Arnd Bergmann44540962009-11-26 06:07:08 +00001737/**
1738 * dev_forward_skb - loopback an skb to another netif
1739 *
1740 * @dev: destination network device
1741 * @skb: buffer to forward
1742 *
1743 * return values:
1744 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001745 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001746 *
1747 * dev_forward_skb can be used for injecting an skb from the
1748 * start_xmit function of one device into the receive queue
1749 * of another device.
1750 *
1751 * The receiving device may be in another namespace, so
1752 * we have to clear all information in the skb that could
1753 * impact namespace isolation.
1754 */
1755int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1756{
Herbert Xua0265d22014-04-17 13:45:03 +08001757 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001758}
1759EXPORT_SYMBOL_GPL(dev_forward_skb);
1760
Changli Gao71d9dec2010-12-15 19:57:25 +00001761static inline int deliver_skb(struct sk_buff *skb,
1762 struct packet_type *pt_prev,
1763 struct net_device *orig_dev)
1764{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001765 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1766 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001767 atomic_inc(&skb->users);
1768 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1769}
1770
Salam Noureddine7866a622015-01-27 11:35:48 -08001771static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1772 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001773 struct net_device *orig_dev,
1774 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08001775 struct list_head *ptype_list)
1776{
1777 struct packet_type *ptype, *pt_prev = *pt;
1778
1779 list_for_each_entry_rcu(ptype, ptype_list, list) {
1780 if (ptype->type != type)
1781 continue;
1782 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02001783 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08001784 pt_prev = ptype;
1785 }
1786 *pt = pt_prev;
1787}
1788
Eric Leblondc0de08d2012-08-16 22:02:58 +00001789static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1790{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001791 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001792 return false;
1793
1794 if (ptype->id_match)
1795 return ptype->id_match(ptype, skb->sk);
1796 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1797 return true;
1798
1799 return false;
1800}
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802/*
1803 * Support routine. Sends outgoing frames to any network
1804 * taps currently in use.
1805 */
1806
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001807static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808{
1809 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001810 struct sk_buff *skb2 = NULL;
1811 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08001812 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08001815again:
1816 list_for_each_entry_rcu(ptype, ptype_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 /* Never send packets back to the socket
1818 * they originated from - MvS (miquels@drinkel.ow.org)
1819 */
Salam Noureddine7866a622015-01-27 11:35:48 -08001820 if (skb_loop_sk(ptype, skb))
1821 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00001822
Salam Noureddine7866a622015-01-27 11:35:48 -08001823 if (pt_prev) {
1824 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00001825 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08001826 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001828
1829 /* need to clone skb, done only once */
1830 skb2 = skb_clone(skb, GFP_ATOMIC);
1831 if (!skb2)
1832 goto out_unlock;
1833
1834 net_timestamp_set(skb2);
1835
1836 /* skb->nh should be correctly
1837 * set by sender, so that the second statement is
1838 * just protection against buggy protocols.
1839 */
1840 skb_reset_mac_header(skb2);
1841
1842 if (skb_network_header(skb2) < skb2->data ||
1843 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1844 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1845 ntohs(skb2->protocol),
1846 dev->name);
1847 skb_reset_network_header(skb2);
1848 }
1849
1850 skb2->transport_header = skb2->network_header;
1851 skb2->pkt_type = PACKET_OUTGOING;
1852 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 }
Salam Noureddine7866a622015-01-27 11:35:48 -08001854
1855 if (ptype_list == &ptype_all) {
1856 ptype_list = &dev->ptype_all;
1857 goto again;
1858 }
1859out_unlock:
Changli Gao71d9dec2010-12-15 19:57:25 +00001860 if (pt_prev)
1861 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 rcu_read_unlock();
1863}
1864
Ben Hutchings2c530402012-07-10 10:55:09 +00001865/**
1866 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001867 * @dev: Network device
1868 * @txq: number of queues available
1869 *
1870 * If real_num_tx_queues is changed the tc mappings may no longer be
1871 * valid. To resolve this verify the tc mapping remains valid and if
1872 * not NULL the mapping. With no priorities mapping to this
1873 * offset/count pair it will no longer be used. In the worst case TC0
1874 * is invalid nothing can be done so disable priority mappings. If is
1875 * expected that drivers will fix this mapping if they can before
1876 * calling netif_set_real_num_tx_queues.
1877 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001878static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001879{
1880 int i;
1881 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1882
1883 /* If TC0 is invalidated disable TC mapping */
1884 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001885 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001886 dev->num_tc = 0;
1887 return;
1888 }
1889
1890 /* Invalidated prio to tc mappings set to TC0 */
1891 for (i = 1; i < TC_BITMASK + 1; i++) {
1892 int q = netdev_get_prio_tc_map(dev, i);
1893
1894 tc = &dev->tc_to_txq[q];
1895 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001896 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1897 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001898 netdev_set_prio_tc_map(dev, i, 0);
1899 }
1900 }
1901}
1902
Alexander Duyck537c00d2013-01-10 08:57:02 +00001903#ifdef CONFIG_XPS
1904static DEFINE_MUTEX(xps_map_mutex);
1905#define xmap_dereference(P) \
1906 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1907
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001908static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1909 int cpu, u16 index)
1910{
1911 struct xps_map *map = NULL;
1912 int pos;
1913
1914 if (dev_maps)
1915 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1916
1917 for (pos = 0; map && pos < map->len; pos++) {
1918 if (map->queues[pos] == index) {
1919 if (map->len > 1) {
1920 map->queues[pos] = map->queues[--map->len];
1921 } else {
1922 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1923 kfree_rcu(map, rcu);
1924 map = NULL;
1925 }
1926 break;
1927 }
1928 }
1929
1930 return map;
1931}
1932
Alexander Duyck024e9672013-01-10 08:57:46 +00001933static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001934{
1935 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001936 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001937 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001938
1939 mutex_lock(&xps_map_mutex);
1940 dev_maps = xmap_dereference(dev->xps_maps);
1941
1942 if (!dev_maps)
1943 goto out_no_maps;
1944
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001945 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001946 for (i = index; i < dev->num_tx_queues; i++) {
1947 if (!remove_xps_queue(dev_maps, cpu, i))
1948 break;
1949 }
1950 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001951 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001952 }
1953
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001954 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001955 RCU_INIT_POINTER(dev->xps_maps, NULL);
1956 kfree_rcu(dev_maps, rcu);
1957 }
1958
Alexander Duyck024e9672013-01-10 08:57:46 +00001959 for (i = index; i < dev->num_tx_queues; i++)
1960 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1961 NUMA_NO_NODE);
1962
Alexander Duyck537c00d2013-01-10 08:57:02 +00001963out_no_maps:
1964 mutex_unlock(&xps_map_mutex);
1965}
1966
Alexander Duyck01c5f862013-01-10 08:57:35 +00001967static struct xps_map *expand_xps_map(struct xps_map *map,
1968 int cpu, u16 index)
1969{
1970 struct xps_map *new_map;
1971 int alloc_len = XPS_MIN_MAP_ALLOC;
1972 int i, pos;
1973
1974 for (pos = 0; map && pos < map->len; pos++) {
1975 if (map->queues[pos] != index)
1976 continue;
1977 return map;
1978 }
1979
1980 /* Need to add queue to this CPU's existing map */
1981 if (map) {
1982 if (pos < map->alloc_len)
1983 return map;
1984
1985 alloc_len = map->alloc_len * 2;
1986 }
1987
1988 /* Need to allocate new map to store queue on this CPU's map */
1989 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1990 cpu_to_node(cpu));
1991 if (!new_map)
1992 return NULL;
1993
1994 for (i = 0; i < pos; i++)
1995 new_map->queues[i] = map->queues[i];
1996 new_map->alloc_len = alloc_len;
1997 new_map->len = pos;
1998
1999 return new_map;
2000}
2001
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002002int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2003 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002004{
Alexander Duyck01c5f862013-01-10 08:57:35 +00002005 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002006 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002007 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002008 int cpu, numa_node_id = -2;
2009 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002010
2011 mutex_lock(&xps_map_mutex);
2012
2013 dev_maps = xmap_dereference(dev->xps_maps);
2014
Alexander Duyck01c5f862013-01-10 08:57:35 +00002015 /* allocate memory for queue storage */
2016 for_each_online_cpu(cpu) {
2017 if (!cpumask_test_cpu(cpu, mask))
2018 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002019
Alexander Duyck01c5f862013-01-10 08:57:35 +00002020 if (!new_dev_maps)
2021 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002022 if (!new_dev_maps) {
2023 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002024 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002025 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002026
2027 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2028 NULL;
2029
2030 map = expand_xps_map(map, cpu, index);
2031 if (!map)
2032 goto error;
2033
2034 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2035 }
2036
2037 if (!new_dev_maps)
2038 goto out_no_new_maps;
2039
2040 for_each_possible_cpu(cpu) {
2041 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2042 /* add queue to CPU maps */
2043 int pos = 0;
2044
2045 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2046 while ((pos < map->len) && (map->queues[pos] != index))
2047 pos++;
2048
2049 if (pos == map->len)
2050 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002051#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002052 if (numa_node_id == -2)
2053 numa_node_id = cpu_to_node(cpu);
2054 else if (numa_node_id != cpu_to_node(cpu))
2055 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002056#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002057 } else if (dev_maps) {
2058 /* fill in the new device map from the old device map */
2059 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2060 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002061 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002062
Alexander Duyck537c00d2013-01-10 08:57:02 +00002063 }
2064
Alexander Duyck01c5f862013-01-10 08:57:35 +00002065 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2066
Alexander Duyck537c00d2013-01-10 08:57:02 +00002067 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002068 if (dev_maps) {
2069 for_each_possible_cpu(cpu) {
2070 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2071 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2072 if (map && map != new_map)
2073 kfree_rcu(map, rcu);
2074 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002075
Alexander Duyck537c00d2013-01-10 08:57:02 +00002076 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002077 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002078
Alexander Duyck01c5f862013-01-10 08:57:35 +00002079 dev_maps = new_dev_maps;
2080 active = true;
2081
2082out_no_new_maps:
2083 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002084 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2085 (numa_node_id >= 0) ? numa_node_id :
2086 NUMA_NO_NODE);
2087
Alexander Duyck01c5f862013-01-10 08:57:35 +00002088 if (!dev_maps)
2089 goto out_no_maps;
2090
2091 /* removes queue from unused CPUs */
2092 for_each_possible_cpu(cpu) {
2093 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2094 continue;
2095
2096 if (remove_xps_queue(dev_maps, cpu, index))
2097 active = true;
2098 }
2099
2100 /* free map if not active */
2101 if (!active) {
2102 RCU_INIT_POINTER(dev->xps_maps, NULL);
2103 kfree_rcu(dev_maps, rcu);
2104 }
2105
2106out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002107 mutex_unlock(&xps_map_mutex);
2108
2109 return 0;
2110error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002111 /* remove any maps that we added */
2112 for_each_possible_cpu(cpu) {
2113 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2114 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2115 NULL;
2116 if (new_map && new_map != map)
2117 kfree(new_map);
2118 }
2119
Alexander Duyck537c00d2013-01-10 08:57:02 +00002120 mutex_unlock(&xps_map_mutex);
2121
Alexander Duyck537c00d2013-01-10 08:57:02 +00002122 kfree(new_dev_maps);
2123 return -ENOMEM;
2124}
2125EXPORT_SYMBOL(netif_set_xps_queue);
2126
2127#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002128/*
2129 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2130 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2131 */
Tom Herberte6484932010-10-18 18:04:39 +00002132int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002133{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002134 int rc;
2135
Tom Herberte6484932010-10-18 18:04:39 +00002136 if (txq < 1 || txq > dev->num_tx_queues)
2137 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002138
Ben Hutchings5c565802011-02-15 19:39:21 +00002139 if (dev->reg_state == NETREG_REGISTERED ||
2140 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002141 ASSERT_RTNL();
2142
Tom Herbert1d24eb42010-11-21 13:17:27 +00002143 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2144 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002145 if (rc)
2146 return rc;
2147
John Fastabend4f57c082011-01-17 08:06:04 +00002148 if (dev->num_tc)
2149 netif_setup_tc(dev, txq);
2150
Alexander Duyck024e9672013-01-10 08:57:46 +00002151 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002152 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002153#ifdef CONFIG_XPS
2154 netif_reset_xps_queues_gt(dev, txq);
2155#endif
2156 }
John Fastabendf0796d52010-07-01 13:21:57 +00002157 }
Tom Herberte6484932010-10-18 18:04:39 +00002158
2159 dev->real_num_tx_queues = txq;
2160 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002161}
2162EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002163
Michael Daltona953be52014-01-16 22:23:28 -08002164#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002165/**
2166 * netif_set_real_num_rx_queues - set actual number of RX queues used
2167 * @dev: Network device
2168 * @rxq: Actual number of RX queues
2169 *
2170 * This must be called either with the rtnl_lock held or before
2171 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002172 * negative error code. If called before registration, it always
2173 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002174 */
2175int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2176{
2177 int rc;
2178
Tom Herbertbd25fa72010-10-18 18:00:16 +00002179 if (rxq < 1 || rxq > dev->num_rx_queues)
2180 return -EINVAL;
2181
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002182 if (dev->reg_state == NETREG_REGISTERED) {
2183 ASSERT_RTNL();
2184
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002185 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2186 rxq);
2187 if (rc)
2188 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002189 }
2190
2191 dev->real_num_rx_queues = rxq;
2192 return 0;
2193}
2194EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2195#endif
2196
Ben Hutchings2c530402012-07-10 10:55:09 +00002197/**
2198 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002199 *
2200 * This routine should set an upper limit on the number of RSS queues
2201 * used by default by multiqueue devices.
2202 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002203int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002204{
2205 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2206}
2207EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2208
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002209static inline void __netif_reschedule(struct Qdisc *q)
2210{
2211 struct softnet_data *sd;
2212 unsigned long flags;
2213
2214 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05002215 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002216 q->next_sched = NULL;
2217 *sd->output_queue_tailp = q;
2218 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002219 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2220 local_irq_restore(flags);
2221}
2222
David S. Miller37437bb2008-07-16 02:15:04 -07002223void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002224{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002225 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2226 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002227}
2228EXPORT_SYMBOL(__netif_schedule);
2229
Eric Dumazete6247022013-12-05 04:45:08 -08002230struct dev_kfree_skb_cb {
2231 enum skb_free_reason reason;
2232};
2233
2234static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002235{
Eric Dumazete6247022013-12-05 04:45:08 -08002236 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002237}
Denis Vlasenko56079432006-03-29 15:57:29 -08002238
John Fastabend46e5da42014-09-12 20:04:52 -07002239void netif_schedule_queue(struct netdev_queue *txq)
2240{
2241 rcu_read_lock();
2242 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2243 struct Qdisc *q = rcu_dereference(txq->qdisc);
2244
2245 __netif_schedule(q);
2246 }
2247 rcu_read_unlock();
2248}
2249EXPORT_SYMBOL(netif_schedule_queue);
2250
2251/**
2252 * netif_wake_subqueue - allow sending packets on subqueue
2253 * @dev: network device
2254 * @queue_index: sub queue index
2255 *
2256 * Resume individual transmit queue of a device with multiple transmit queues.
2257 */
2258void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2259{
2260 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2261
2262 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2263 struct Qdisc *q;
2264
2265 rcu_read_lock();
2266 q = rcu_dereference(txq->qdisc);
2267 __netif_schedule(q);
2268 rcu_read_unlock();
2269 }
2270}
2271EXPORT_SYMBOL(netif_wake_subqueue);
2272
2273void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2274{
2275 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2276 struct Qdisc *q;
2277
2278 rcu_read_lock();
2279 q = rcu_dereference(dev_queue->qdisc);
2280 __netif_schedule(q);
2281 rcu_read_unlock();
2282 }
2283}
2284EXPORT_SYMBOL(netif_tx_wake_queue);
2285
Eric Dumazete6247022013-12-05 04:45:08 -08002286void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2287{
2288 unsigned long flags;
2289
2290 if (likely(atomic_read(&skb->users) == 1)) {
2291 smp_rmb();
2292 atomic_set(&skb->users, 0);
2293 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2294 return;
2295 }
2296 get_kfree_skb_cb(skb)->reason = reason;
2297 local_irq_save(flags);
2298 skb->next = __this_cpu_read(softnet_data.completion_queue);
2299 __this_cpu_write(softnet_data.completion_queue, skb);
2300 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2301 local_irq_restore(flags);
2302}
2303EXPORT_SYMBOL(__dev_kfree_skb_irq);
2304
2305void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002306{
2307 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002308 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002309 else
2310 dev_kfree_skb(skb);
2311}
Eric Dumazete6247022013-12-05 04:45:08 -08002312EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002313
2314
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002315/**
2316 * netif_device_detach - mark device as removed
2317 * @dev: network device
2318 *
2319 * Mark device as removed from system and therefore no longer available.
2320 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002321void netif_device_detach(struct net_device *dev)
2322{
2323 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2324 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002325 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002326 }
2327}
2328EXPORT_SYMBOL(netif_device_detach);
2329
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002330/**
2331 * netif_device_attach - mark device as attached
2332 * @dev: network device
2333 *
2334 * Mark device as attached from system and restart if needed.
2335 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002336void netif_device_attach(struct net_device *dev)
2337{
2338 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2339 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002340 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002341 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002342 }
2343}
2344EXPORT_SYMBOL(netif_device_attach);
2345
Ben Hutchings36c92472012-01-17 07:57:56 +00002346static void skb_warn_bad_offload(const struct sk_buff *skb)
2347{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002348 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002349 struct net_device *dev = skb->dev;
2350 const char *driver = "";
2351
Ben Greearc846ad92013-04-19 10:45:52 +00002352 if (!net_ratelimit())
2353 return;
2354
Ben Hutchings36c92472012-01-17 07:57:56 +00002355 if (dev && dev->dev.parent)
2356 driver = dev_driver_string(dev->dev.parent);
2357
2358 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2359 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002360 driver, dev ? &dev->features : &null_features,
2361 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002362 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2363 skb_shinfo(skb)->gso_type, skb->ip_summed);
2364}
2365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366/*
2367 * Invalidate hardware checksum when packet is to be mangled, and
2368 * complete checksum manually on outgoing path.
2369 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002370int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371{
Al Virod3bc23e2006-11-14 21:24:49 -08002372 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002373 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
Patrick McHardy84fa7932006-08-29 16:44:56 -07002375 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002376 goto out_set_summed;
2377
2378 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002379 skb_warn_bad_offload(skb);
2380 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 }
2382
Eric Dumazetcef401d2013-01-25 20:34:37 +00002383 /* Before computing a checksum, we should make sure no frag could
2384 * be modified by an external entity : checksum could be wrong.
2385 */
2386 if (skb_has_shared_frag(skb)) {
2387 ret = __skb_linearize(skb);
2388 if (ret)
2389 goto out;
2390 }
2391
Michał Mirosław55508d62010-12-14 15:24:08 +00002392 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002393 BUG_ON(offset >= skb_headlen(skb));
2394 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2395
2396 offset += skb->csum_offset;
2397 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2398
2399 if (skb_cloned(skb) &&
2400 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2402 if (ret)
2403 goto out;
2404 }
2405
Herbert Xua0308472007-10-15 01:47:15 -07002406 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002407out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002409out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 return ret;
2411}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002412EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413
Vlad Yasevich53d64712014-03-27 17:26:18 -04002414__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002415{
2416 __be16 type = skb->protocol;
2417
Pravin B Shelar19acc322013-05-07 20:41:07 +00002418 /* Tunnel gso handlers can set protocol to ethernet. */
2419 if (type == htons(ETH_P_TEB)) {
2420 struct ethhdr *eth;
2421
2422 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2423 return 0;
2424
2425 eth = (struct ethhdr *)skb_mac_header(skb);
2426 type = eth->h_proto;
2427 }
2428
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09002429 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002430}
2431
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002432/**
2433 * skb_mac_gso_segment - mac layer segmentation handler.
2434 * @skb: buffer to segment
2435 * @features: features for the output path (see dev->features)
2436 */
2437struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2438 netdev_features_t features)
2439{
2440 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2441 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002442 int vlan_depth = skb->mac_len;
2443 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002444
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002445 if (unlikely(!type))
2446 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002447
Vlad Yasevich53d64712014-03-27 17:26:18 -04002448 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002449
2450 rcu_read_lock();
2451 list_for_each_entry_rcu(ptype, &offload_base, list) {
2452 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002453 segs = ptype->callbacks.gso_segment(skb, features);
2454 break;
2455 }
2456 }
2457 rcu_read_unlock();
2458
2459 __skb_push(skb, skb->data - skb_mac_header(skb));
2460
2461 return segs;
2462}
2463EXPORT_SYMBOL(skb_mac_gso_segment);
2464
2465
Cong Wang12b00042013-02-05 16:36:38 +00002466/* openvswitch calls this on rx path, so we need a different check.
2467 */
2468static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2469{
2470 if (tx_path)
2471 return skb->ip_summed != CHECKSUM_PARTIAL;
2472 else
2473 return skb->ip_summed == CHECKSUM_NONE;
2474}
2475
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002476/**
Cong Wang12b00042013-02-05 16:36:38 +00002477 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002478 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002479 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002480 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002481 *
2482 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002483 *
2484 * It may return NULL if the skb requires no segmentation. This is
2485 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002486 */
Cong Wang12b00042013-02-05 16:36:38 +00002487struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2488 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002489{
Cong Wang12b00042013-02-05 16:36:38 +00002490 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002491 int err;
2492
Ben Hutchings36c92472012-01-17 07:57:56 +00002493 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002494
françois romieua40e0a62014-07-15 23:55:35 +02002495 err = skb_cow_head(skb, 0);
2496 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002497 return ERR_PTR(err);
2498 }
2499
Pravin B Shelar68c33162013-02-14 14:02:41 +00002500 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002501 SKB_GSO_CB(skb)->encap_level = 0;
2502
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002503 skb_reset_mac_header(skb);
2504 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002505
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002506 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002507}
Cong Wang12b00042013-02-05 16:36:38 +00002508EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002509
Herbert Xufb286bb2005-11-10 13:01:24 -08002510/* Take action when hardware reception checksum errors are detected. */
2511#ifdef CONFIG_BUG
2512void netdev_rx_csum_fault(struct net_device *dev)
2513{
2514 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002515 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002516 dump_stack();
2517 }
2518}
2519EXPORT_SYMBOL(netdev_rx_csum_fault);
2520#endif
2521
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522/* Actually, we should eliminate this check as soon as we know, that:
2523 * 1. IOMMU is present and allows to map all the memory.
2524 * 2. No high memory really exists on this machine.
2525 */
2526
Florian Westphalc1e756b2014-05-05 15:00:44 +02002527static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002529#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002531 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002532 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2533 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2534 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002535 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002536 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002539 if (PCI_DMA_BUS_IS_PHYS) {
2540 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
Eric Dumazet9092c652010-04-02 13:34:49 -07002542 if (!pdev)
2543 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002544 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002545 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2546 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002547 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2548 return 1;
2549 }
2550 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002551#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 return 0;
2553}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554
Simon Horman3b392dd2014-06-04 08:53:17 +09002555/* If MPLS offload request, verify we are testing hardware MPLS features
2556 * instead of standard features for the netdev.
2557 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08002558#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09002559static netdev_features_t net_mpls_features(struct sk_buff *skb,
2560 netdev_features_t features,
2561 __be16 type)
2562{
Simon Horman25cd9ba2014-10-06 05:05:13 -07002563 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09002564 features &= skb->dev->mpls_features;
2565
2566 return features;
2567}
2568#else
2569static netdev_features_t net_mpls_features(struct sk_buff *skb,
2570 netdev_features_t features,
2571 __be16 type)
2572{
2573 return features;
2574}
2575#endif
2576
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002577static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002578 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002579{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002580 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002581 __be16 type;
2582
2583 type = skb_network_protocol(skb, &tmp);
2584 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002585
Ed Cashinc0d680e2012-09-19 15:49:00 +00002586 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002587 !can_checksum_protocol(features, type)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002588 features &= ~NETIF_F_ALL_CSUM;
Florian Westphalc1e756b2014-05-05 15:00:44 +02002589 } else if (illegal_highdma(skb->dev, skb)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002590 features &= ~NETIF_F_SG;
2591 }
2592
2593 return features;
2594}
2595
Toshiaki Makitae38f3022015-03-27 14:31:13 +09002596netdev_features_t passthru_features_check(struct sk_buff *skb,
2597 struct net_device *dev,
2598 netdev_features_t features)
2599{
2600 return features;
2601}
2602EXPORT_SYMBOL(passthru_features_check);
2603
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002604static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2605 struct net_device *dev,
2606 netdev_features_t features)
2607{
2608 return vlan_features_check(skb, features);
2609}
2610
Florian Westphalc1e756b2014-05-05 15:00:44 +02002611netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002612{
Jesse Gross5f352272014-12-23 22:37:26 -08002613 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002614 netdev_features_t features = dev->features;
2615 u16 gso_segs = skb_shinfo(skb)->gso_segs;
Jesse Gross58e998c2010-10-29 12:14:55 +00002616
Eric Dumazetfcbeb972014-10-05 10:11:27 -07002617 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
Ben Hutchings30b678d2012-07-30 15:57:00 +00002618 features &= ~NETIF_F_GSO_MASK;
2619
Jesse Gross5f352272014-12-23 22:37:26 -08002620 /* If encapsulation offload request, verify we are testing
2621 * hardware encapsulation features instead of standard
2622 * features for the netdev
2623 */
2624 if (skb->encapsulation)
2625 features &= dev->hw_enc_features;
2626
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09002627 if (skb_vlan_tagged(skb))
2628 features = netdev_intersect_features(features,
2629 dev->vlan_features |
2630 NETIF_F_HW_VLAN_CTAG_TX |
2631 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00002632
Jesse Gross5f352272014-12-23 22:37:26 -08002633 if (dev->netdev_ops->ndo_features_check)
2634 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2635 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002636 else
2637 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002638
Florian Westphalc1e756b2014-05-05 15:00:44 +02002639 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002640}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002641EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002642
David S. Miller2ea25512014-08-29 21:10:01 -07002643static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07002644 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002645{
David S. Miller2ea25512014-08-29 21:10:01 -07002646 unsigned int len;
2647 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08002648
Salam Noureddine7866a622015-01-27 11:35:48 -08002649 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
David S. Miller2ea25512014-08-29 21:10:01 -07002650 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00002651
David S. Miller2ea25512014-08-29 21:10:01 -07002652 len = skb->len;
2653 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07002654 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07002655 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00002656
Patrick McHardy572a9d72009-11-10 06:14:14 +00002657 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002658}
David S. Miller2ea25512014-08-29 21:10:01 -07002659
David S. Miller8dcda222014-09-01 15:06:40 -07002660struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2661 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07002662{
2663 struct sk_buff *skb = first;
2664 int rc = NETDEV_TX_OK;
2665
2666 while (skb) {
2667 struct sk_buff *next = skb->next;
2668
2669 skb->next = NULL;
David S. Miller95f6b3d2014-08-29 21:57:30 -07002670 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07002671 if (unlikely(!dev_xmit_complete(rc))) {
2672 skb->next = next;
2673 goto out;
2674 }
2675
2676 skb = next;
2677 if (netif_xmit_stopped(txq) && skb) {
2678 rc = NETDEV_TX_BUSY;
2679 break;
2680 }
2681 }
2682
2683out:
2684 *ret = rc;
2685 return skb;
2686}
2687
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07002688static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2689 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07002690{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002691 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01002692 !vlan_hw_offload_capable(features, skb->vlan_proto))
2693 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07002694 return skb;
2695}
2696
Eric Dumazet55a93b32014-10-03 15:31:07 -07002697static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
David S. Millereae3f882014-08-30 15:17:13 -07002698{
2699 netdev_features_t features;
2700
2701 if (skb->next)
2702 return skb;
2703
David S. Millereae3f882014-08-30 15:17:13 -07002704 features = netif_skb_features(skb);
2705 skb = validate_xmit_vlan(skb, features);
2706 if (unlikely(!skb))
2707 goto out_null;
2708
Johannes Berg8b86a612015-04-17 15:45:04 +02002709 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07002710 struct sk_buff *segs;
2711
2712 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08002713 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08002714 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08002715 } else if (segs) {
2716 consume_skb(skb);
2717 skb = segs;
2718 }
David S. Millereae3f882014-08-30 15:17:13 -07002719 } else {
2720 if (skb_needs_linearize(skb, features) &&
2721 __skb_linearize(skb))
2722 goto out_kfree_skb;
2723
2724 /* If packet is not checksummed and device does not
2725 * support checksumming for this protocol, complete
2726 * checksumming here.
2727 */
2728 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2729 if (skb->encapsulation)
2730 skb_set_inner_transport_header(skb,
2731 skb_checksum_start_offset(skb));
2732 else
2733 skb_set_transport_header(skb,
2734 skb_checksum_start_offset(skb));
2735 if (!(features & NETIF_F_ALL_CSUM) &&
2736 skb_checksum_help(skb))
2737 goto out_kfree_skb;
2738 }
2739 }
2740
2741 return skb;
2742
2743out_kfree_skb:
2744 kfree_skb(skb);
2745out_null:
2746 return NULL;
2747}
2748
Eric Dumazet55a93b32014-10-03 15:31:07 -07002749struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2750{
2751 struct sk_buff *next, *head = NULL, *tail;
2752
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07002753 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07002754 next = skb->next;
2755 skb->next = NULL;
Eric Dumazet55a93b32014-10-03 15:31:07 -07002756
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07002757 /* in case skb wont be segmented, point to itself */
2758 skb->prev = skb;
2759
2760 skb = validate_xmit_skb(skb, dev);
2761 if (!skb)
2762 continue;
2763
2764 if (!head)
2765 head = skb;
2766 else
2767 tail->next = skb;
2768 /* If skb was segmented, skb->prev points to
2769 * the last segment. If not, it still contains skb.
2770 */
2771 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07002772 }
2773 return head;
2774}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002775
Eric Dumazet1def9232013-01-10 12:36:42 +00002776static void qdisc_pkt_len_init(struct sk_buff *skb)
2777{
2778 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2779
2780 qdisc_skb_cb(skb)->pkt_len = skb->len;
2781
2782 /* To get more precise estimation of bytes sent on wire,
2783 * we add to pkt_len the headers size of all segments
2784 */
2785 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002786 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002787 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002788
Eric Dumazet757b8b12013-01-15 21:14:21 -08002789 /* mac layer + network layer */
2790 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2791
2792 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002793 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2794 hdr_len += tcp_hdrlen(skb);
2795 else
2796 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002797
2798 if (shinfo->gso_type & SKB_GSO_DODGY)
2799 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2800 shinfo->gso_size);
2801
2802 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002803 }
2804}
2805
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002806static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2807 struct net_device *dev,
2808 struct netdev_queue *txq)
2809{
2810 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002811 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002812 int rc;
2813
Eric Dumazet1def9232013-01-10 12:36:42 +00002814 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002815 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002816 /*
2817 * Heuristic to force contended enqueues to serialize on a
2818 * separate lock before trying to get qdisc main lock.
Ying Xue9bf2b8c2014-06-26 15:56:31 +08002819 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2820 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07002821 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002822 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002823 if (unlikely(contended))
2824 spin_lock(&q->busylock);
2825
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002826 spin_lock(root_lock);
2827 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2828 kfree_skb(skb);
2829 rc = NET_XMIT_DROP;
2830 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002831 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002832 /*
2833 * This is a work-conserving queue; there are no old skbs
2834 * waiting to be sent out; and the qdisc is not running -
2835 * xmit the skb directly.
2836 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002837
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002838 qdisc_bstats_update(q, skb);
2839
Eric Dumazet55a93b32014-10-03 15:31:07 -07002840 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07002841 if (unlikely(contended)) {
2842 spin_unlock(&q->busylock);
2843 contended = false;
2844 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002845 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002846 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002847 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002848
2849 rc = NET_XMIT_SUCCESS;
2850 } else {
Eric Dumazeta2da5702011-01-20 03:48:19 +00002851 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002852 if (qdisc_run_begin(q)) {
2853 if (unlikely(contended)) {
2854 spin_unlock(&q->busylock);
2855 contended = false;
2856 }
2857 __qdisc_run(q);
2858 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002859 }
2860 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002861 if (unlikely(contended))
2862 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002863 return rc;
2864}
2865
Daniel Borkmann86f85152013-12-29 17:27:11 +01002866#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00002867static void skb_update_prio(struct sk_buff *skb)
2868{
Igor Maravic6977a792011-11-25 07:44:54 +00002869 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002870
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002871 if (!skb->priority && skb->sk && map) {
2872 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2873
2874 if (prioidx < map->priomap_len)
2875 skb->priority = map->priomap[prioidx];
2876 }
Neil Horman5bc14212011-11-22 05:10:51 +00002877}
2878#else
2879#define skb_update_prio(skb)
2880#endif
2881
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02002882DEFINE_PER_CPU(int, xmit_recursion);
2883EXPORT_SYMBOL(xmit_recursion);
2884
David S. Miller11a766c2010-10-25 12:51:55 -07002885#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002886
Dave Jonesd29f7492008-07-22 14:09:06 -07002887/**
Michel Machado95603e22012-06-12 10:16:35 +00002888 * dev_loopback_xmit - loop back @skb
2889 * @skb: buffer to transmit
2890 */
David Miller7026b1d2015-04-05 22:19:04 -04002891int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00002892{
2893 skb_reset_mac_header(skb);
2894 __skb_pull(skb, skb_network_offset(skb));
2895 skb->pkt_type = PACKET_LOOPBACK;
2896 skb->ip_summed = CHECKSUM_UNNECESSARY;
2897 WARN_ON(!skb_dst(skb));
2898 skb_dst_force(skb);
2899 netif_rx_ni(skb);
2900 return 0;
2901}
2902EXPORT_SYMBOL(dev_loopback_xmit);
2903
2904/**
Jason Wang9d08dd32014-01-20 11:25:13 +08002905 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07002906 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08002907 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07002908 *
2909 * Queue a buffer for transmission to a network device. The caller must
2910 * have set the device and priority and built the buffer before calling
2911 * this function. The function can be called from an interrupt.
2912 *
2913 * A negative errno code is returned on a failure. A success does not
2914 * guarantee the frame will be transmitted as it may be dropped due
2915 * to congestion or traffic shaping.
2916 *
2917 * -----------------------------------------------------------------------------------
2918 * I notice this method can also return errors from the queue disciplines,
2919 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2920 * be positive.
2921 *
2922 * Regardless of the return value, the skb is consumed, so it is currently
2923 * difficult to retry a send to this method. (You can bump the ref count
2924 * before sending to hold a reference for retry if you are careful.)
2925 *
2926 * When calling this method, interrupts MUST be enabled. This is because
2927 * the BH enable code must have IRQs enabled so that it will not deadlock.
2928 * --BLG
2929 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05302930static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931{
2932 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002933 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 struct Qdisc *q;
2935 int rc = -ENOMEM;
2936
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002937 skb_reset_mac_header(skb);
2938
Willem de Bruijne7fd2882014-08-04 22:11:48 -04002939 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2940 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2941
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002942 /* Disable soft irqs for various locks below. Also
2943 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002945 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946
Neil Horman5bc14212011-11-22 05:10:51 +00002947 skb_update_prio(skb);
2948
Eric Dumazet02875872014-10-05 18:38:35 -07002949 /* If device/qdisc don't need skb->dst, release it right now while
2950 * its hot in this cpu cache.
2951 */
2952 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2953 skb_dst_drop(skb);
2954 else
2955 skb_dst_force(skb);
2956
Jason Wangf663dd92014-01-10 16:18:26 +08002957 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002958 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002959
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002961 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002963 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002965 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002966 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 }
2968
2969 /* The device has no queue. Common case for software devices:
2970 loopback, all the sorts of tunnels...
2971
Herbert Xu932ff272006-06-09 12:20:56 -07002972 Really, it is unlikely that netif_tx_lock protection is necessary
2973 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 counters.)
2975 However, it is possible, that they rely on protection
2976 made by us here.
2977
2978 Check this and shot the lock. It is not prone from deadlocks.
2979 Either shot noqueue qdisc, it is even simpler 8)
2980 */
2981 if (dev->flags & IFF_UP) {
2982 int cpu = smp_processor_id(); /* ok because BHs are off */
2983
David S. Millerc773e842008-07-08 23:13:53 -07002984 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985
Eric Dumazet745e20f2010-09-29 13:23:09 -07002986 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2987 goto recursion_alert;
2988
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02002989 skb = validate_xmit_skb(skb, dev);
2990 if (!skb)
2991 goto drop;
2992
David S. Millerc773e842008-07-08 23:13:53 -07002993 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994
Tom Herbert734664982011-11-28 16:32:44 +00002995 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002996 __this_cpu_inc(xmit_recursion);
David S. Millerce937182014-08-30 19:22:20 -07002997 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002998 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002999 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07003000 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 goto out;
3002 }
3003 }
David S. Millerc773e842008-07-08 23:13:53 -07003004 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00003005 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3006 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 } else {
3008 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07003009 * unfortunately
3010 */
3011recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00003012 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3013 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 }
3015 }
3016
3017 rc = -ENETDOWN;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003018drop:
Herbert Xud4828d82006-06-22 02:28:18 -07003019 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020
Eric Dumazet015f0682014-03-27 08:45:56 -07003021 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02003022 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 return rc;
3024out:
Herbert Xud4828d82006-06-22 02:28:18 -07003025 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 return rc;
3027}
Jason Wangf663dd92014-01-10 16:18:26 +08003028
David Miller7026b1d2015-04-05 22:19:04 -04003029int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08003030{
3031 return __dev_queue_xmit(skb, NULL);
3032}
David Miller7026b1d2015-04-05 22:19:04 -04003033EXPORT_SYMBOL(dev_queue_xmit_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034
Jason Wangf663dd92014-01-10 16:18:26 +08003035int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3036{
3037 return __dev_queue_xmit(skb, accel_priv);
3038}
3039EXPORT_SYMBOL(dev_queue_xmit_accel);
3040
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041
3042/*=======================================================================
3043 Receiver routines
3044 =======================================================================*/
3045
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003046int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003047EXPORT_SYMBOL(netdev_max_backlog);
3048
Eric Dumazet3b098e22010-05-15 23:57:10 -07003049int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003050int netdev_budget __read_mostly = 300;
3051int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003053/* Called with irq disabled */
3054static inline void ____napi_schedule(struct softnet_data *sd,
3055 struct napi_struct *napi)
3056{
3057 list_add_tail(&napi->poll_list, &sd->poll_list);
3058 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3059}
3060
Eric Dumazetdf334542010-03-24 19:13:54 +00003061#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003062
3063/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003064struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003065EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003066u32 rps_cpu_mask __read_mostly;
3067EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07003068
Ingo Molnarc5905af2012-02-24 08:31:31 +01003069struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00003070
Ben Hutchingsc4454772011-01-19 11:03:53 +00003071static struct rps_dev_flow *
3072set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3073 struct rps_dev_flow *rflow, u16 next_cpu)
3074{
Eric Dumazeta31196b2015-04-25 09:35:24 -07003075 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003076#ifdef CONFIG_RFS_ACCEL
3077 struct netdev_rx_queue *rxqueue;
3078 struct rps_dev_flow_table *flow_table;
3079 struct rps_dev_flow *old_rflow;
3080 u32 flow_id;
3081 u16 rxq_index;
3082 int rc;
3083
3084 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003085 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3086 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003087 goto out;
3088 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3089 if (rxq_index == skb_get_rx_queue(skb))
3090 goto out;
3091
3092 rxqueue = dev->_rx + rxq_index;
3093 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3094 if (!flow_table)
3095 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003096 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003097 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3098 rxq_index, flow_id);
3099 if (rc < 0)
3100 goto out;
3101 old_rflow = rflow;
3102 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003103 rflow->filter = rc;
3104 if (old_rflow->filter == rflow->filter)
3105 old_rflow->filter = RPS_NO_FILTER;
3106 out:
3107#endif
3108 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003109 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003110 }
3111
Ben Hutchings09994d12011-10-03 04:42:46 +00003112 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003113 return rflow;
3114}
3115
Tom Herbert0a9627f2010-03-16 08:03:29 +00003116/*
3117 * get_rps_cpu is called from netif_receive_skb and returns the target
3118 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003119 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003120 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003121static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3122 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003123{
Eric Dumazet567e4b72015-02-06 12:59:01 -08003124 const struct rps_sock_flow_table *sock_flow_table;
3125 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07003126 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003127 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003128 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003129 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003130 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003131
Tom Herbert0a9627f2010-03-16 08:03:29 +00003132 if (skb_rx_queue_recorded(skb)) {
3133 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08003134
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003135 if (unlikely(index >= dev->real_num_rx_queues)) {
3136 WARN_ONCE(dev->real_num_rx_queues > 1,
3137 "%s received packet on queue %u, but number "
3138 "of RX queues is %u\n",
3139 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003140 goto done;
3141 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08003142 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003143 }
3144
Eric Dumazet567e4b72015-02-06 12:59:01 -08003145 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3146
3147 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3148 map = rcu_dereference(rxqueue->rps_map);
3149 if (!flow_table && !map)
3150 goto done;
3151
Changli Gao2d47b452010-08-17 19:00:56 +00003152 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003153 hash = skb_get_hash(skb);
3154 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003155 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003156
Tom Herbertfec5e652010-04-16 16:01:27 -07003157 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3158 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003159 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08003160 u32 next_cpu;
3161 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07003162
Eric Dumazet567e4b72015-02-06 12:59:01 -08003163 /* First check into global flow table if there is a match */
3164 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3165 if ((ident ^ hash) & ~rps_cpu_mask)
3166 goto try_rps;
3167
3168 next_cpu = ident & rps_cpu_mask;
3169
3170 /* OK, now we know there is a match,
3171 * we can look at the local (per receive queue) flow table
3172 */
Tom Herbert61b905d2014-03-24 15:34:47 -07003173 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003174 tcpu = rflow->cpu;
3175
Tom Herbertfec5e652010-04-16 16:01:27 -07003176 /*
3177 * If the desired CPU (where last recvmsg was done) is
3178 * different from current CPU (one in the rx-queue flow
3179 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07003180 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07003181 * - Current CPU is offline.
3182 * - The current CPU's queue tail has advanced beyond the
3183 * last packet that was enqueued using this table entry.
3184 * This guarantees that all previous packets for the flow
3185 * have been dequeued, thus preserving in order delivery.
3186 */
3187 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07003188 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07003189 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003190 rflow->last_qtail)) >= 0)) {
3191 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003192 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003193 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003194
Eric Dumazeta31196b2015-04-25 09:35:24 -07003195 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003196 *rflowp = rflow;
3197 cpu = tcpu;
3198 goto done;
3199 }
3200 }
3201
Eric Dumazet567e4b72015-02-06 12:59:01 -08003202try_rps:
3203
Tom Herbert0a9627f2010-03-16 08:03:29 +00003204 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02003205 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003206 if (cpu_online(tcpu)) {
3207 cpu = tcpu;
3208 goto done;
3209 }
3210 }
3211
3212done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003213 return cpu;
3214}
3215
Ben Hutchingsc4454772011-01-19 11:03:53 +00003216#ifdef CONFIG_RFS_ACCEL
3217
3218/**
3219 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3220 * @dev: Device on which the filter was set
3221 * @rxq_index: RX queue index
3222 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3223 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3224 *
3225 * Drivers that implement ndo_rx_flow_steer() should periodically call
3226 * this function for each installed filter and remove the filters for
3227 * which it returns %true.
3228 */
3229bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3230 u32 flow_id, u16 filter_id)
3231{
3232 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3233 struct rps_dev_flow_table *flow_table;
3234 struct rps_dev_flow *rflow;
3235 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07003236 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003237
3238 rcu_read_lock();
3239 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3240 if (flow_table && flow_id <= flow_table->mask) {
3241 rflow = &flow_table->flows[flow_id];
3242 cpu = ACCESS_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07003243 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00003244 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3245 rflow->last_qtail) <
3246 (int)(10 * flow_table->mask)))
3247 expire = false;
3248 }
3249 rcu_read_unlock();
3250 return expire;
3251}
3252EXPORT_SYMBOL(rps_may_expire_flow);
3253
3254#endif /* CONFIG_RFS_ACCEL */
3255
Tom Herbert0a9627f2010-03-16 08:03:29 +00003256/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003257static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003258{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003259 struct softnet_data *sd = data;
3260
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003261 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003262 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003263}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003264
Tom Herbertfec5e652010-04-16 16:01:27 -07003265#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003266
3267/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003268 * Check if this softnet_data structure is another cpu one
3269 * If yes, queue it to our IPI list and return 1
3270 * If no, return 0
3271 */
3272static int rps_ipi_queued(struct softnet_data *sd)
3273{
3274#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05003275 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003276
3277 if (sd != mysd) {
3278 sd->rps_ipi_next = mysd->rps_ipi_list;
3279 mysd->rps_ipi_list = sd;
3280
3281 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3282 return 1;
3283 }
3284#endif /* CONFIG_RPS */
3285 return 0;
3286}
3287
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003288#ifdef CONFIG_NET_FLOW_LIMIT
3289int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3290#endif
3291
3292static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3293{
3294#ifdef CONFIG_NET_FLOW_LIMIT
3295 struct sd_flow_limit *fl;
3296 struct softnet_data *sd;
3297 unsigned int old_flow, new_flow;
3298
3299 if (qlen < (netdev_max_backlog >> 1))
3300 return false;
3301
Christoph Lameter903ceff2014-08-17 12:30:35 -05003302 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003303
3304 rcu_read_lock();
3305 fl = rcu_dereference(sd->flow_limit);
3306 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003307 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003308 old_flow = fl->history[fl->history_head];
3309 fl->history[fl->history_head] = new_flow;
3310
3311 fl->history_head++;
3312 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3313
3314 if (likely(fl->buckets[old_flow]))
3315 fl->buckets[old_flow]--;
3316
3317 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3318 fl->count++;
3319 rcu_read_unlock();
3320 return true;
3321 }
3322 }
3323 rcu_read_unlock();
3324#endif
3325 return false;
3326}
3327
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003328/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003329 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3330 * queue (may be a remote CPU queue).
3331 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003332static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3333 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003334{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003335 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003336 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003337 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003338
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003339 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003340
3341 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003342
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003343 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003344 qlen = skb_queue_len(&sd->input_pkt_queue);
3345 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08003346 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003347enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003348 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003349 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003350 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003351 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003352 return NET_RX_SUCCESS;
3353 }
3354
Eric Dumazetebda37c22010-05-06 23:51:21 +00003355 /* Schedule NAPI for backlog device
3356 * We can use non atomic operation since we own the queue lock
3357 */
3358 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003359 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003360 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003361 }
3362 goto enqueue;
3363 }
3364
Changli Gaodee42872010-05-02 05:42:16 +00003365 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003366 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003367
Tom Herbert0a9627f2010-03-16 08:03:29 +00003368 local_irq_restore(flags);
3369
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003370 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003371 kfree_skb(skb);
3372 return NET_RX_DROP;
3373}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003375static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003377 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378
Eric Dumazet588f0332011-11-15 04:12:55 +00003379 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380
Koki Sanagicf66ba52010-08-23 18:45:02 +09003381 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003382#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003383 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003384 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003385 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386
Changli Gaocece1942010-08-07 20:35:43 -07003387 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003388 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003389
3390 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003391 if (cpu < 0)
3392 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003393
3394 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3395
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003396 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003397 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003398 } else
3399#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003400 {
3401 unsigned int qtail;
3402 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3403 put_cpu();
3404 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003405 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003407
3408/**
3409 * netif_rx - post buffer to the network code
3410 * @skb: buffer to post
3411 *
3412 * This function receives a packet from a device driver and queues it for
3413 * the upper (protocol) levels to process. It always succeeds. The buffer
3414 * may be dropped during processing for congestion control or by the
3415 * protocol layers.
3416 *
3417 * return values:
3418 * NET_RX_SUCCESS (no congestion)
3419 * NET_RX_DROP (packet was dropped)
3420 *
3421 */
3422
3423int netif_rx(struct sk_buff *skb)
3424{
3425 trace_netif_rx_entry(skb);
3426
3427 return netif_rx_internal(skb);
3428}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003429EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430
3431int netif_rx_ni(struct sk_buff *skb)
3432{
3433 int err;
3434
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003435 trace_netif_rx_ni_entry(skb);
3436
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003438 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 if (local_softirq_pending())
3440 do_softirq();
3441 preempt_enable();
3442
3443 return err;
3444}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445EXPORT_SYMBOL(netif_rx_ni);
3446
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447static void net_tx_action(struct softirq_action *h)
3448{
Christoph Lameter903ceff2014-08-17 12:30:35 -05003449 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450
3451 if (sd->completion_queue) {
3452 struct sk_buff *clist;
3453
3454 local_irq_disable();
3455 clist = sd->completion_queue;
3456 sd->completion_queue = NULL;
3457 local_irq_enable();
3458
3459 while (clist) {
3460 struct sk_buff *skb = clist;
3461 clist = clist->next;
3462
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003463 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003464 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3465 trace_consume_skb(skb);
3466 else
3467 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 __kfree_skb(skb);
3469 }
3470 }
3471
3472 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003473 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474
3475 local_irq_disable();
3476 head = sd->output_queue;
3477 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003478 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 local_irq_enable();
3480
3481 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003482 struct Qdisc *q = head;
3483 spinlock_t *root_lock;
3484
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 head = head->next_sched;
3486
David S. Miller5fb66222008-08-02 20:02:43 -07003487 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003488 if (spin_trylock(root_lock)) {
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003489 smp_mb__before_atomic();
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003490 clear_bit(__QDISC_STATE_SCHED,
3491 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003492 qdisc_run(q);
3493 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003495 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003496 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003497 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003498 } else {
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003499 smp_mb__before_atomic();
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003500 clear_bit(__QDISC_STATE_SCHED,
3501 &q->state);
3502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 }
3504 }
3505 }
3506}
3507
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003508#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3509 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003510/* This hook is defined here for ATM LANE */
3511int (*br_fdb_test_addr_hook)(struct net_device *dev,
3512 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003513EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003514#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516#ifdef CONFIG_NET_CLS_ACT
3517/* TODO: Maybe we should just force sch_ingress to be compiled in
3518 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3519 * a compare and 2 stores extra right now if we dont have it on
3520 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003521 * NOTE: This doesn't stop any functionality; if you dont have
3522 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 *
3524 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003525static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003528 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003529 int result = TC_ACT_OK;
3530 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003531
Stephen Hemmingerde384832010-08-01 00:33:23 -07003532 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003533 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3534 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003535 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 }
3537
Herbert Xuf697c3e2007-10-14 00:38:47 -07003538 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3539 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3540
John Fastabend46e5da42014-09-12 20:04:52 -07003541 q = rcu_dereference(rxq->qdisc);
David S. Miller8d50b532008-07-30 02:37:46 -07003542 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003543 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003544 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3545 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003546 spin_unlock(qdisc_lock(q));
3547 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003548
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 return result;
3550}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003551
3552static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3553 struct packet_type **pt_prev,
3554 int *ret, struct net_device *orig_dev)
3555{
Eric Dumazet24824a02010-10-02 06:11:55 +00003556 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3557
John Fastabend46e5da42014-09-12 20:04:52 -07003558 if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
Daniel Borkmann45771392015-04-10 23:07:54 +02003559 return skb;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003560
3561 if (*pt_prev) {
3562 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3563 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003564 }
3565
Eric Dumazet24824a02010-10-02 06:11:55 +00003566 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003567 case TC_ACT_SHOT:
3568 case TC_ACT_STOLEN:
3569 kfree_skb(skb);
3570 return NULL;
3571 }
3572
Herbert Xuf697c3e2007-10-14 00:38:47 -07003573 return skb;
3574}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575#endif
3576
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003577/**
3578 * netdev_rx_handler_register - register receive handler
3579 * @dev: device to register a handler for
3580 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003581 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003582 *
Masanari Iidae2278672014-02-18 22:54:36 +09003583 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003584 * called from __netif_receive_skb. A negative errno code is returned
3585 * on a failure.
3586 *
3587 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003588 *
3589 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003590 */
3591int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003592 rx_handler_func_t *rx_handler,
3593 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003594{
3595 ASSERT_RTNL();
3596
3597 if (dev->rx_handler)
3598 return -EBUSY;
3599
Eric Dumazet00cfec32013-03-29 03:01:22 +00003600 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003601 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003602 rcu_assign_pointer(dev->rx_handler, rx_handler);
3603
3604 return 0;
3605}
3606EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3607
3608/**
3609 * netdev_rx_handler_unregister - unregister receive handler
3610 * @dev: device to unregister a handler from
3611 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003612 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003613 *
3614 * The caller must hold the rtnl_mutex.
3615 */
3616void netdev_rx_handler_unregister(struct net_device *dev)
3617{
3618
3619 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003620 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003621 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3622 * section has a guarantee to see a non NULL rx_handler_data
3623 * as well.
3624 */
3625 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003626 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003627}
3628EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3629
Mel Gormanb4b9e352012-07-31 16:44:26 -07003630/*
3631 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3632 * the special handling of PFMEMALLOC skbs.
3633 */
3634static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3635{
3636 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07003637 case htons(ETH_P_ARP):
3638 case htons(ETH_P_IP):
3639 case htons(ETH_P_IPV6):
3640 case htons(ETH_P_8021Q):
3641 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003642 return true;
3643 default:
3644 return false;
3645 }
3646}
3647
David S. Miller9754e292013-02-14 15:57:38 -05003648static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649{
3650 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003651 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003652 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003653 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003655 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656
Eric Dumazet588f0332011-11-15 04:12:55 +00003657 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003658
Koki Sanagicf66ba52010-08-23 18:45:02 +09003659 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003660
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003661 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003662
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003663 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003664 if (!skb_transport_header_was_set(skb))
3665 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003666 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667
3668 pt_prev = NULL;
3669
3670 rcu_read_lock();
3671
David S. Miller63d8ea72011-02-28 10:48:59 -08003672another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003673 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003674
3675 __this_cpu_inc(softnet_data.processed);
3676
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003677 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3678 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04003679 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003680 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003681 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003682 }
3683
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684#ifdef CONFIG_NET_CLS_ACT
3685 if (skb->tc_verd & TC_NCLS) {
3686 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3687 goto ncls;
3688 }
3689#endif
3690
David S. Miller9754e292013-02-14 15:57:38 -05003691 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003692 goto skip_taps;
3693
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08003695 if (pt_prev)
3696 ret = deliver_skb(skb, pt_prev, orig_dev);
3697 pt_prev = ptype;
3698 }
3699
3700 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3701 if (pt_prev)
3702 ret = deliver_skb(skb, pt_prev, orig_dev);
3703 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 }
3705
Mel Gormanb4b9e352012-07-31 16:44:26 -07003706skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmann45771392015-04-10 23:07:54 +02003708 if (static_key_false(&ingress_needed)) {
3709 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3710 if (!skb)
3711 goto unlock;
3712 }
3713
3714 skb->tc_verd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715ncls:
3716#endif
David S. Miller9754e292013-02-14 15:57:38 -05003717 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003718 goto drop;
3719
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003720 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00003721 if (pt_prev) {
3722 ret = deliver_skb(skb, pt_prev, orig_dev);
3723 pt_prev = NULL;
3724 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003725 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003726 goto another_round;
3727 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003728 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003729 }
3730
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003731 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003732 if (rx_handler) {
3733 if (pt_prev) {
3734 ret = deliver_skb(skb, pt_prev, orig_dev);
3735 pt_prev = NULL;
3736 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003737 switch (rx_handler(&skb)) {
3738 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003739 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003740 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003741 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003742 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003743 case RX_HANDLER_EXACT:
3744 deliver_exact = true;
3745 case RX_HANDLER_PASS:
3746 break;
3747 default:
3748 BUG();
3749 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003750 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003752 if (unlikely(skb_vlan_tag_present(skb))) {
3753 if (skb_vlan_tag_get_id(skb))
Eric Dumazetd4b812d2013-07-18 07:19:26 -07003754 skb->pkt_type = PACKET_OTHERHOST;
3755 /* Note: we might in the future use prio bits
3756 * and set skb->priority like in vlan_do_receive()
3757 * For the time being, just ignore Priority Code Point
3758 */
3759 skb->vlan_tci = 0;
3760 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003761
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08003763
3764 /* deliver only exact match when indicated */
3765 if (likely(!deliver_exact)) {
3766 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3767 &ptype_base[ntohs(type) &
3768 PTYPE_HASH_MASK]);
3769 }
3770
3771 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3772 &orig_dev->ptype_specific);
3773
3774 if (unlikely(skb->dev != orig_dev)) {
3775 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3776 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777 }
3778
3779 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003780 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003781 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003782 else
3783 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003785drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003786 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 kfree_skb(skb);
3788 /* Jamal, now you will not able to escape explaining
3789 * me how you were going to use this. :-)
3790 */
3791 ret = NET_RX_DROP;
3792 }
3793
Mel Gormanb4b9e352012-07-31 16:44:26 -07003794unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 rcu_read_unlock();
David S. Miller9754e292013-02-14 15:57:38 -05003796 return ret;
3797}
3798
3799static int __netif_receive_skb(struct sk_buff *skb)
3800{
3801 int ret;
3802
3803 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3804 unsigned long pflags = current->flags;
3805
3806 /*
3807 * PFMEMALLOC skbs are special, they should
3808 * - be delivered to SOCK_MEMALLOC sockets only
3809 * - stay away from userspace
3810 * - have bounded memory usage
3811 *
3812 * Use PF_MEMALLOC as this saves us from propagating the allocation
3813 * context down to all allocation sites.
3814 */
3815 current->flags |= PF_MEMALLOC;
3816 ret = __netif_receive_skb_core(skb, true);
3817 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3818 } else
3819 ret = __netif_receive_skb_core(skb, false);
3820
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 return ret;
3822}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003823
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003824static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003825{
Eric Dumazet588f0332011-11-15 04:12:55 +00003826 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003827
Richard Cochranc1f19b52010-07-17 08:49:36 +00003828 if (skb_defer_rx_timestamp(skb))
3829 return NET_RX_SUCCESS;
3830
Eric Dumazetdf334542010-03-24 19:13:54 +00003831#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003832 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003833 struct rps_dev_flow voidflow, *rflow = &voidflow;
3834 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003835
Eric Dumazet3b098e22010-05-15 23:57:10 -07003836 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003837
Eric Dumazet3b098e22010-05-15 23:57:10 -07003838 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003839
Eric Dumazet3b098e22010-05-15 23:57:10 -07003840 if (cpu >= 0) {
3841 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3842 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003843 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003844 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003845 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003846 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003847#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003848 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003849}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003850
3851/**
3852 * netif_receive_skb - process receive buffer from network
3853 * @skb: buffer to process
3854 *
3855 * netif_receive_skb() is the main receive data processing function.
3856 * It always succeeds. The buffer may be dropped during processing
3857 * for congestion control or by the protocol layers.
3858 *
3859 * This function may only be called from softirq context and interrupts
3860 * should be enabled.
3861 *
3862 * Return values (usually ignored):
3863 * NET_RX_SUCCESS: no congestion
3864 * NET_RX_DROP: packet was dropped
3865 */
David Miller7026b1d2015-04-05 22:19:04 -04003866int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003867{
3868 trace_netif_receive_skb_entry(skb);
3869
3870 return netif_receive_skb_internal(skb);
3871}
David Miller7026b1d2015-04-05 22:19:04 -04003872EXPORT_SYMBOL(netif_receive_skb_sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873
Eric Dumazet88751272010-04-19 05:07:33 +00003874/* Network device is going away, flush any packets still pending
3875 * Called with irqs disabled.
3876 */
Changli Gao152102c2010-03-30 20:16:22 +00003877static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003878{
Changli Gao152102c2010-03-30 20:16:22 +00003879 struct net_device *dev = arg;
Christoph Lameter903ceff2014-08-17 12:30:35 -05003880 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003881 struct sk_buff *skb, *tmp;
3882
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003883 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003884 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003885 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003886 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003887 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003888 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003889 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003890 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003891 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003892
3893 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3894 if (skb->dev == dev) {
3895 __skb_unlink(skb, &sd->process_queue);
3896 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003897 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003898 }
3899 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003900}
3901
Herbert Xud565b0a2008-12-15 23:38:52 -08003902static int napi_gro_complete(struct sk_buff *skb)
3903{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003904 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003905 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003906 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003907 int err = -ENOENT;
3908
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003909 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3910
Herbert Xufc59f9a2009-04-14 15:11:06 -07003911 if (NAPI_GRO_CB(skb)->count == 1) {
3912 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003913 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003914 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003915
3916 rcu_read_lock();
3917 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003918 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003919 continue;
3920
Jerry Chu299603e82013-12-11 20:53:45 -08003921 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08003922 break;
3923 }
3924 rcu_read_unlock();
3925
3926 if (err) {
3927 WARN_ON(&ptype->list == head);
3928 kfree_skb(skb);
3929 return NET_RX_SUCCESS;
3930 }
3931
3932out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003933 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003934}
3935
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003936/* napi->gro_list contains packets ordered by age.
3937 * youngest packets at the head of it.
3938 * Complete skbs in reverse order to reduce latencies.
3939 */
3940void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003941{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003942 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003943
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003944 /* scan list and build reverse chain */
3945 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3946 skb->prev = prev;
3947 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003948 }
3949
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003950 for (skb = prev; skb; skb = prev) {
3951 skb->next = NULL;
3952
3953 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3954 return;
3955
3956 prev = skb->prev;
3957 napi_gro_complete(skb);
3958 napi->gro_count--;
3959 }
3960
Herbert Xud565b0a2008-12-15 23:38:52 -08003961 napi->gro_list = NULL;
3962}
Eric Dumazet86cac582010-08-31 18:25:32 +00003963EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003964
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003965static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3966{
3967 struct sk_buff *p;
3968 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08003969 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003970
3971 for (p = napi->gro_list; p; p = p->next) {
3972 unsigned long diffs;
3973
Tom Herbert0b4cec82014-01-15 08:58:06 -08003974 NAPI_GRO_CB(p)->flush = 0;
3975
3976 if (hash != skb_get_hash_raw(p)) {
3977 NAPI_GRO_CB(p)->same_flow = 0;
3978 continue;
3979 }
3980
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003981 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3982 diffs |= p->vlan_tci ^ skb->vlan_tci;
3983 if (maclen == ETH_HLEN)
3984 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07003985 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003986 else if (!diffs)
3987 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07003988 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003989 maclen);
3990 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003991 }
3992}
3993
Jerry Chu299603e82013-12-11 20:53:45 -08003994static void skb_gro_reset_offset(struct sk_buff *skb)
3995{
3996 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3997 const skb_frag_t *frag0 = &pinfo->frags[0];
3998
3999 NAPI_GRO_CB(skb)->data_offset = 0;
4000 NAPI_GRO_CB(skb)->frag0 = NULL;
4001 NAPI_GRO_CB(skb)->frag0_len = 0;
4002
4003 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4004 pinfo->nr_frags &&
4005 !PageHighMem(skb_frag_page(frag0))) {
4006 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4007 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xud565b0a2008-12-15 23:38:52 -08004008 }
4009}
4010
Eric Dumazeta50e2332014-03-29 21:28:21 -07004011static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4012{
4013 struct skb_shared_info *pinfo = skb_shinfo(skb);
4014
4015 BUG_ON(skb->end - skb->tail < grow);
4016
4017 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4018
4019 skb->data_len -= grow;
4020 skb->tail += grow;
4021
4022 pinfo->frags[0].page_offset += grow;
4023 skb_frag_size_sub(&pinfo->frags[0], grow);
4024
4025 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4026 skb_frag_unref(skb, 0);
4027 memmove(pinfo->frags, pinfo->frags + 1,
4028 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4029 }
4030}
4031
Rami Rosenbb728822012-11-28 21:55:25 +00004032static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08004033{
4034 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004035 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08004036 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00004037 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004038 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004039 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004040 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08004041
Eric W. Biederman9c62a682014-03-14 20:51:52 -07004042 if (!(skb->dev->features & NETIF_F_GRO))
Herbert Xud565b0a2008-12-15 23:38:52 -08004043 goto normal;
4044
Tom Herbert5a212322014-08-31 15:12:41 -07004045 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
Herbert Xuf17f5c92009-01-14 14:36:12 -08004046 goto normal;
4047
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004048 gro_list_prepare(napi, skb);
4049
Herbert Xud565b0a2008-12-15 23:38:52 -08004050 rcu_read_lock();
4051 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004052 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08004053 continue;
4054
Herbert Xu86911732009-01-29 14:19:50 +00004055 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00004056 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004057 NAPI_GRO_CB(skb)->same_flow = 0;
4058 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08004059 NAPI_GRO_CB(skb)->free = 0;
Or Gerlitzb582ef02014-01-20 13:59:19 +02004060 NAPI_GRO_CB(skb)->udp_mark = 0;
Tom Herbert15e23962015-02-10 16:30:31 -08004061 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004062
Tom Herbert662880f2014-08-27 21:26:56 -07004063 /* Setup for GRO checksum validation */
4064 switch (skb->ip_summed) {
4065 case CHECKSUM_COMPLETE:
4066 NAPI_GRO_CB(skb)->csum = skb->csum;
4067 NAPI_GRO_CB(skb)->csum_valid = 1;
4068 NAPI_GRO_CB(skb)->csum_cnt = 0;
4069 break;
4070 case CHECKSUM_UNNECESSARY:
4071 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4072 NAPI_GRO_CB(skb)->csum_valid = 0;
4073 break;
4074 default:
4075 NAPI_GRO_CB(skb)->csum_cnt = 0;
4076 NAPI_GRO_CB(skb)->csum_valid = 0;
4077 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004078
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00004079 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004080 break;
4081 }
4082 rcu_read_unlock();
4083
4084 if (&ptype->list == head)
4085 goto normal;
4086
Herbert Xu0da2afd52008-12-26 14:57:42 -08004087 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004088 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08004089
Herbert Xud565b0a2008-12-15 23:38:52 -08004090 if (pp) {
4091 struct sk_buff *nskb = *pp;
4092
4093 *pp = nskb->next;
4094 nskb->next = NULL;
4095 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00004096 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08004097 }
4098
Herbert Xu0da2afd52008-12-26 14:57:42 -08004099 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08004100 goto ok;
4101
Eric Dumazet600adc12014-01-09 14:12:19 -08004102 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08004103 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08004104
Eric Dumazet600adc12014-01-09 14:12:19 -08004105 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4106 struct sk_buff *nskb = napi->gro_list;
4107
4108 /* locate the end of the list to select the 'oldest' flow */
4109 while (nskb->next) {
4110 pp = &nskb->next;
4111 nskb = *pp;
4112 }
4113 *pp = NULL;
4114 nskb->next = NULL;
4115 napi_gro_complete(nskb);
4116 } else {
4117 napi->gro_count++;
4118 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004119 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004120 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004121 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004122 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004123 skb->next = napi->gro_list;
4124 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004125 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004126
Herbert Xuad0f9902009-02-01 01:24:55 -08004127pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004128 grow = skb_gro_offset(skb) - skb_headlen(skb);
4129 if (grow > 0)
4130 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004131ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004132 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004133
4134normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004135 ret = GRO_NORMAL;
4136 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004137}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004138
Jerry Chubf5a7552014-01-07 10:23:19 -08004139struct packet_offload *gro_find_receive_by_type(__be16 type)
4140{
4141 struct list_head *offload_head = &offload_base;
4142 struct packet_offload *ptype;
4143
4144 list_for_each_entry_rcu(ptype, offload_head, list) {
4145 if (ptype->type != type || !ptype->callbacks.gro_receive)
4146 continue;
4147 return ptype;
4148 }
4149 return NULL;
4150}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004151EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004152
4153struct packet_offload *gro_find_complete_by_type(__be16 type)
4154{
4155 struct list_head *offload_head = &offload_base;
4156 struct packet_offload *ptype;
4157
4158 list_for_each_entry_rcu(ptype, offload_head, list) {
4159 if (ptype->type != type || !ptype->callbacks.gro_complete)
4160 continue;
4161 return ptype;
4162 }
4163 return NULL;
4164}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004165EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004166
Rami Rosenbb728822012-11-28 21:55:25 +00004167static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004168{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004169 switch (ret) {
4170 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004171 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004172 ret = GRO_DROP;
4173 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004174
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004175 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004176 kfree_skb(skb);
4177 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004178
Eric Dumazetdaa86542012-04-19 07:07:40 +00004179 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00004180 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4181 kmem_cache_free(skbuff_head_cache, skb);
4182 else
4183 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00004184 break;
4185
Ben Hutchings5b252f02009-10-29 07:17:09 +00004186 case GRO_HELD:
4187 case GRO_MERGED:
4188 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004189 }
4190
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004191 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004192}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004193
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004194gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004195{
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004196 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004197
Eric Dumazeta50e2332014-03-29 21:28:21 -07004198 skb_gro_reset_offset(skb);
4199
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004200 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004201}
4202EXPORT_SYMBOL(napi_gro_receive);
4203
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004204static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004205{
Eric Dumazet93a35f52014-10-23 06:30:30 -07004206 if (unlikely(skb->pfmemalloc)) {
4207 consume_skb(skb);
4208 return;
4209 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004210 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004211 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4212 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004213 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004214 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004215 skb->skb_iif = 0;
Jerry Chuc3caf112014-07-14 15:54:46 -07004216 skb->encapsulation = 0;
4217 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004218 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08004219
4220 napi->skb = skb;
4221}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004222
Herbert Xu76620aa2009-04-16 02:02:07 -07004223struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004224{
Herbert Xu5d38a072009-01-04 16:13:40 -08004225 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004226
4227 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08004228 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazet84b9cd62013-12-05 21:44:27 -08004229 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004230 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004231 return skb;
4232}
Herbert Xu76620aa2009-04-16 02:02:07 -07004233EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004234
Eric Dumazeta50e2332014-03-29 21:28:21 -07004235static gro_result_t napi_frags_finish(struct napi_struct *napi,
4236 struct sk_buff *skb,
4237 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004238{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004239 switch (ret) {
4240 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004241 case GRO_HELD:
4242 __skb_push(skb, ETH_HLEN);
4243 skb->protocol = eth_type_trans(skb, skb->dev);
4244 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004245 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004246 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004247
4248 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004249 case GRO_MERGED_FREE:
4250 napi_reuse_skb(napi, skb);
4251 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004252
4253 case GRO_MERGED:
4254 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004255 }
4256
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004257 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004258}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004259
Eric Dumazeta50e2332014-03-29 21:28:21 -07004260/* Upper GRO stack assumes network header starts at gro_offset=0
4261 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4262 * We copy ethernet header into skb->data to have a common layout.
4263 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004264static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004265{
Herbert Xu76620aa2009-04-16 02:02:07 -07004266 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004267 const struct ethhdr *eth;
4268 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07004269
4270 napi->skb = NULL;
4271
Eric Dumazeta50e2332014-03-29 21:28:21 -07004272 skb_reset_mac_header(skb);
4273 skb_gro_reset_offset(skb);
4274
4275 eth = skb_gro_header_fast(skb, 0);
4276 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4277 eth = skb_gro_header_slow(skb, hlen, 0);
4278 if (unlikely(!eth)) {
4279 napi_reuse_skb(napi, skb);
4280 return NULL;
4281 }
4282 } else {
4283 gro_pull_from_frag0(skb, hlen);
4284 NAPI_GRO_CB(skb)->frag0 += hlen;
4285 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07004286 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07004287 __skb_pull(skb, hlen);
4288
4289 /*
4290 * This works because the only protocols we care about don't require
4291 * special handling.
4292 * We'll fix it up properly in napi_frags_finish()
4293 */
4294 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07004295
Herbert Xu76620aa2009-04-16 02:02:07 -07004296 return skb;
4297}
Herbert Xu76620aa2009-04-16 02:02:07 -07004298
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004299gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004300{
4301 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004302
4303 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004304 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004305
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004306 trace_napi_gro_frags_entry(skb);
4307
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004308 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004309}
4310EXPORT_SYMBOL(napi_gro_frags);
4311
Tom Herbert573e8fc2014-08-22 13:33:47 -07004312/* Compute the checksum from gro_offset and return the folded value
4313 * after adding in any pseudo checksum.
4314 */
4315__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4316{
4317 __wsum wsum;
4318 __sum16 sum;
4319
4320 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4321
4322 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4323 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4324 if (likely(!sum)) {
4325 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4326 !skb->csum_complete_sw)
4327 netdev_rx_csum_fault(skb->dev);
4328 }
4329
4330 NAPI_GRO_CB(skb)->csum = wsum;
4331 NAPI_GRO_CB(skb)->csum_valid = 1;
4332
4333 return sum;
4334}
4335EXPORT_SYMBOL(__skb_gro_checksum_complete);
4336
Eric Dumazete326bed2010-04-22 00:22:45 -07004337/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08004338 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07004339 * Note: called with local irq disabled, but exits with local irq enabled.
4340 */
4341static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4342{
4343#ifdef CONFIG_RPS
4344 struct softnet_data *remsd = sd->rps_ipi_list;
4345
4346 if (remsd) {
4347 sd->rps_ipi_list = NULL;
4348
4349 local_irq_enable();
4350
4351 /* Send pending IPI's to kick RPS processing on remote cpus. */
4352 while (remsd) {
4353 struct softnet_data *next = remsd->rps_ipi_next;
4354
4355 if (cpu_online(remsd->cpu))
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +01004356 smp_call_function_single_async(remsd->cpu,
Frederic Weisbeckerfce8ad12014-02-24 16:40:01 +01004357 &remsd->csd);
Eric Dumazete326bed2010-04-22 00:22:45 -07004358 remsd = next;
4359 }
4360 } else
4361#endif
4362 local_irq_enable();
4363}
4364
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004365static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4366{
4367#ifdef CONFIG_RPS
4368 return sd->rps_ipi_list != NULL;
4369#else
4370 return false;
4371#endif
4372}
4373
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004374static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375{
4376 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004377 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
Eric Dumazete326bed2010-04-22 00:22:45 -07004379 /* Check if we have pending ipi, its better to send them now,
4380 * not waiting net_rx_action() end.
4381 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004382 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07004383 local_irq_disable();
4384 net_rps_action_and_irq_enable(sd);
4385 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004386
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004387 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004388 local_irq_disable();
Tom Herbert11ef7a82014-06-30 09:50:40 -07004389 while (1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391
Changli Gao6e7676c2010-04-27 15:07:33 -07004392 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004393 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004394 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004395 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004396 input_queue_head_incr(sd);
4397 if (++work >= quota) {
4398 local_irq_enable();
4399 return work;
4400 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402
Changli Gao6e7676c2010-04-27 15:07:33 -07004403 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07004404 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004405 /*
4406 * Inline a custom version of __napi_complete().
4407 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07004408 * and NAPI_STATE_SCHED is the only possible flag set
4409 * on backlog.
4410 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004411 * and we dont need an smp_mb() memory barrier.
4412 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004413 napi->state = 0;
Tom Herbert11ef7a82014-06-30 09:50:40 -07004414 rps_unlock(sd);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004415
Tom Herbert11ef7a82014-06-30 09:50:40 -07004416 break;
Changli Gao6e7676c2010-04-27 15:07:33 -07004417 }
Tom Herbert11ef7a82014-06-30 09:50:40 -07004418
4419 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4420 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07004421 rps_unlock(sd);
4422 }
4423 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004425 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426}
4427
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004428/**
4429 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004430 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004431 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07004432 * The entry's receive function will be scheduled to run.
4433 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004434 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004435void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004436{
4437 unsigned long flags;
4438
4439 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05004440 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004441 local_irq_restore(flags);
4442}
4443EXPORT_SYMBOL(__napi_schedule);
4444
Eric Dumazetbc9ad162014-10-28 18:05:13 -07004445/**
4446 * __napi_schedule_irqoff - schedule for receive
4447 * @n: entry to schedule
4448 *
4449 * Variant of __napi_schedule() assuming hard irqs are masked
4450 */
4451void __napi_schedule_irqoff(struct napi_struct *n)
4452{
4453 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4454}
4455EXPORT_SYMBOL(__napi_schedule_irqoff);
4456
Herbert Xud565b0a2008-12-15 23:38:52 -08004457void __napi_complete(struct napi_struct *n)
4458{
4459 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
Herbert Xud565b0a2008-12-15 23:38:52 -08004460
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004461 list_del_init(&n->poll_list);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004462 smp_mb__before_atomic();
Herbert Xud565b0a2008-12-15 23:38:52 -08004463 clear_bit(NAPI_STATE_SCHED, &n->state);
4464}
4465EXPORT_SYMBOL(__napi_complete);
4466
Eric Dumazet3b47d302014-11-06 21:09:44 -08004467void napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08004468{
4469 unsigned long flags;
4470
4471 /*
4472 * don't let napi dequeue from the cpu poll list
4473 * just in case its running on a different cpu
4474 */
4475 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4476 return;
4477
Eric Dumazet3b47d302014-11-06 21:09:44 -08004478 if (n->gro_list) {
4479 unsigned long timeout = 0;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004480
Eric Dumazet3b47d302014-11-06 21:09:44 -08004481 if (work_done)
4482 timeout = n->dev->gro_flush_timeout;
4483
4484 if (timeout)
4485 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4486 HRTIMER_MODE_REL_PINNED);
4487 else
4488 napi_gro_flush(n, false);
4489 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004490 if (likely(list_empty(&n->poll_list))) {
4491 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4492 } else {
4493 /* If n->poll_list is not empty, we need to mask irqs */
4494 local_irq_save(flags);
4495 __napi_complete(n);
4496 local_irq_restore(flags);
4497 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004498}
Eric Dumazet3b47d302014-11-06 21:09:44 -08004499EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08004500
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004501/* must be called under rcu_read_lock(), as we dont take a reference */
4502struct napi_struct *napi_by_id(unsigned int napi_id)
4503{
4504 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4505 struct napi_struct *napi;
4506
4507 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4508 if (napi->napi_id == napi_id)
4509 return napi;
4510
4511 return NULL;
4512}
4513EXPORT_SYMBOL_GPL(napi_by_id);
4514
4515void napi_hash_add(struct napi_struct *napi)
4516{
4517 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4518
4519 spin_lock(&napi_hash_lock);
4520
4521 /* 0 is not a valid id, we also skip an id that is taken
4522 * we expect both events to be extremely rare
4523 */
4524 napi->napi_id = 0;
4525 while (!napi->napi_id) {
4526 napi->napi_id = ++napi_gen_id;
4527 if (napi_by_id(napi->napi_id))
4528 napi->napi_id = 0;
4529 }
4530
4531 hlist_add_head_rcu(&napi->napi_hash_node,
4532 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4533
4534 spin_unlock(&napi_hash_lock);
4535 }
4536}
4537EXPORT_SYMBOL_GPL(napi_hash_add);
4538
4539/* Warning : caller is responsible to make sure rcu grace period
4540 * is respected before freeing memory containing @napi
4541 */
4542void napi_hash_del(struct napi_struct *napi)
4543{
4544 spin_lock(&napi_hash_lock);
4545
4546 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4547 hlist_del_rcu(&napi->napi_hash_node);
4548
4549 spin_unlock(&napi_hash_lock);
4550}
4551EXPORT_SYMBOL_GPL(napi_hash_del);
4552
Eric Dumazet3b47d302014-11-06 21:09:44 -08004553static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4554{
4555 struct napi_struct *napi;
4556
4557 napi = container_of(timer, struct napi_struct, timer);
4558 if (napi->gro_list)
4559 napi_schedule(napi);
4560
4561 return HRTIMER_NORESTART;
4562}
4563
Herbert Xud565b0a2008-12-15 23:38:52 -08004564void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4565 int (*poll)(struct napi_struct *, int), int weight)
4566{
4567 INIT_LIST_HEAD(&napi->poll_list);
Eric Dumazet3b47d302014-11-06 21:09:44 -08004568 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4569 napi->timer.function = napi_watchdog;
Herbert Xu4ae55442009-02-08 18:00:36 +00004570 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004571 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004572 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004573 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004574 if (weight > NAPI_POLL_WEIGHT)
4575 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4576 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004577 napi->weight = weight;
4578 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004579 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004580#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004581 spin_lock_init(&napi->poll_lock);
4582 napi->poll_owner = -1;
4583#endif
4584 set_bit(NAPI_STATE_SCHED, &napi->state);
4585}
4586EXPORT_SYMBOL(netif_napi_add);
4587
Eric Dumazet3b47d302014-11-06 21:09:44 -08004588void napi_disable(struct napi_struct *n)
4589{
4590 might_sleep();
4591 set_bit(NAPI_STATE_DISABLE, &n->state);
4592
4593 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4594 msleep(1);
4595
4596 hrtimer_cancel(&n->timer);
4597
4598 clear_bit(NAPI_STATE_DISABLE, &n->state);
4599}
4600EXPORT_SYMBOL(napi_disable);
4601
Herbert Xud565b0a2008-12-15 23:38:52 -08004602void netif_napi_del(struct napi_struct *napi)
4603{
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004604 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004605 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004606
Eric Dumazet289dccb2013-12-20 14:29:08 -08004607 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004608 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004609 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004610}
4611EXPORT_SYMBOL(netif_napi_del);
4612
Herbert Xu726ce702014-12-21 07:16:21 +11004613static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4614{
4615 void *have;
4616 int work, weight;
4617
4618 list_del_init(&n->poll_list);
4619
4620 have = netpoll_poll_lock(n);
4621
4622 weight = n->weight;
4623
4624 /* This NAPI_STATE_SCHED test is for avoiding a race
4625 * with netpoll's poll_napi(). Only the entity which
4626 * obtains the lock and sees NAPI_STATE_SCHED set will
4627 * actually make the ->poll() call. Therefore we avoid
4628 * accidentally calling ->poll() when NAPI is not scheduled.
4629 */
4630 work = 0;
4631 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4632 work = n->poll(n, weight);
4633 trace_napi_poll(n);
4634 }
4635
4636 WARN_ON_ONCE(work > weight);
4637
4638 if (likely(work < weight))
4639 goto out_unlock;
4640
4641 /* Drivers must not modify the NAPI state if they
4642 * consume the entire weight. In such cases this code
4643 * still "owns" the NAPI instance and therefore can
4644 * move the instance around on the list at-will.
4645 */
4646 if (unlikely(napi_disable_pending(n))) {
4647 napi_complete(n);
4648 goto out_unlock;
4649 }
4650
4651 if (n->gro_list) {
4652 /* flush too old packets
4653 * If HZ < 1000, flush all packets.
4654 */
4655 napi_gro_flush(n, HZ >= 1000);
4656 }
4657
Herbert Xu001ce542014-12-21 07:16:22 +11004658 /* Some drivers may have called napi_schedule
4659 * prior to exhausting their budget.
4660 */
4661 if (unlikely(!list_empty(&n->poll_list))) {
4662 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4663 n->dev ? n->dev->name : "backlog");
4664 goto out_unlock;
4665 }
4666
Herbert Xu726ce702014-12-21 07:16:21 +11004667 list_add_tail(&n->poll_list, repoll);
4668
4669out_unlock:
4670 netpoll_poll_unlock(have);
4671
4672 return work;
4673}
4674
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675static void net_rx_action(struct softirq_action *h)
4676{
Christoph Lameter903ceff2014-08-17 12:30:35 -05004677 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004678 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004679 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004680 LIST_HEAD(list);
4681 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07004682
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004684 list_splice_init(&sd->poll_list, &list);
4685 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004686
Herbert Xuceb8d5b2014-12-21 07:16:25 +11004687 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004688 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004689
Herbert Xuceb8d5b2014-12-21 07:16:25 +11004690 if (list_empty(&list)) {
4691 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4692 return;
4693 break;
4694 }
4695
Herbert Xu6bd373e2014-12-21 07:16:24 +11004696 n = list_first_entry(&list, struct napi_struct, poll_list);
4697 budget -= napi_poll(n, &repoll);
4698
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004699 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004700 * Allow this to run for 2 jiffies since which will allow
4701 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004702 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11004703 if (unlikely(budget <= 0 ||
4704 time_after_eq(jiffies, time_limit))) {
4705 sd->time_squeeze++;
4706 break;
4707 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004709
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08004710 local_irq_disable();
4711
4712 list_splice_tail_init(&sd->poll_list, &list);
4713 list_splice_tail(&repoll, &list);
4714 list_splice(&list, &sd->poll_list);
4715 if (!list_empty(&sd->poll_list))
4716 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4717
Eric Dumazete326bed2010-04-22 00:22:45 -07004718 net_rps_action_and_irq_enable(sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719}
4720
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004721struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004722 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004723
4724 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004725 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004726
Veaceslav Falico5d261912013-08-28 23:25:05 +02004727 /* counter for the number of times this device was added to us */
4728 u16 ref_nr;
4729
Veaceslav Falico402dae92013-09-25 09:20:09 +02004730 /* private field for the users */
4731 void *private;
4732
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004733 struct list_head list;
4734 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004735};
4736
Veaceslav Falico5d261912013-08-28 23:25:05 +02004737static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4738 struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004739 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004740{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004741 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004742
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004743 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004744 if (adj->dev == adj_dev)
4745 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004746 }
4747 return NULL;
4748}
4749
4750/**
4751 * netdev_has_upper_dev - Check if device is linked to an upper device
4752 * @dev: device
4753 * @upper_dev: upper device to check
4754 *
4755 * Find out if a device is linked to specified upper device and return true
4756 * in case it is. Note that this checks only immediate upper device,
4757 * not through a complete stack of devices. The caller must hold the RTNL lock.
4758 */
4759bool netdev_has_upper_dev(struct net_device *dev,
4760 struct net_device *upper_dev)
4761{
4762 ASSERT_RTNL();
4763
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004764 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004765}
4766EXPORT_SYMBOL(netdev_has_upper_dev);
4767
4768/**
4769 * netdev_has_any_upper_dev - Check if device is linked to some device
4770 * @dev: device
4771 *
4772 * Find out if a device is linked to an upper device and return true in case
4773 * it is. The caller must hold the RTNL lock.
4774 */
stephen hemminger1d143d92013-12-29 14:01:29 -08004775static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004776{
4777 ASSERT_RTNL();
4778
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004779 return !list_empty(&dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004780}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004781
4782/**
4783 * netdev_master_upper_dev_get - Get master upper device
4784 * @dev: device
4785 *
4786 * Find a master upper device and return pointer to it or NULL in case
4787 * it's not there. The caller must hold the RTNL lock.
4788 */
4789struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4790{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004791 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004792
4793 ASSERT_RTNL();
4794
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004795 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004796 return NULL;
4797
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004798 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004799 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004800 if (likely(upper->master))
4801 return upper->dev;
4802 return NULL;
4803}
4804EXPORT_SYMBOL(netdev_master_upper_dev_get);
4805
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02004806void *netdev_adjacent_get_private(struct list_head *adj_list)
4807{
4808 struct netdev_adjacent *adj;
4809
4810 adj = list_entry(adj_list, struct netdev_adjacent, list);
4811
4812 return adj->private;
4813}
4814EXPORT_SYMBOL(netdev_adjacent_get_private);
4815
Veaceslav Falico31088a12013-09-25 09:20:12 +02004816/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04004817 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4818 * @dev: device
4819 * @iter: list_head ** of the current position
4820 *
4821 * Gets the next device from the dev's upper list, starting from iter
4822 * position. The caller must hold RCU read lock.
4823 */
4824struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4825 struct list_head **iter)
4826{
4827 struct netdev_adjacent *upper;
4828
4829 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4830
4831 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4832
4833 if (&upper->list == &dev->adj_list.upper)
4834 return NULL;
4835
4836 *iter = &upper->list;
4837
4838 return upper->dev;
4839}
4840EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4841
4842/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02004843 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
Veaceslav Falico48311f42013-08-28 23:25:07 +02004844 * @dev: device
4845 * @iter: list_head ** of the current position
4846 *
4847 * Gets the next device from the dev's upper list, starting from iter
4848 * position. The caller must hold RCU read lock.
4849 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004850struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4851 struct list_head **iter)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004852{
4853 struct netdev_adjacent *upper;
4854
John Fastabend85328242013-11-26 06:33:52 +00004855 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
Veaceslav Falico48311f42013-08-28 23:25:07 +02004856
4857 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4858
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004859 if (&upper->list == &dev->all_adj_list.upper)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004860 return NULL;
4861
4862 *iter = &upper->list;
4863
4864 return upper->dev;
4865}
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004866EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
Veaceslav Falico48311f42013-08-28 23:25:07 +02004867
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004868/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02004869 * netdev_lower_get_next_private - Get the next ->private from the
4870 * lower neighbour list
4871 * @dev: device
4872 * @iter: list_head ** of the current position
4873 *
4874 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4875 * list, starting from iter position. The caller must hold either hold the
4876 * RTNL lock or its own locking that guarantees that the neighbour lower
4877 * list will remain unchainged.
4878 */
4879void *netdev_lower_get_next_private(struct net_device *dev,
4880 struct list_head **iter)
4881{
4882 struct netdev_adjacent *lower;
4883
4884 lower = list_entry(*iter, struct netdev_adjacent, list);
4885
4886 if (&lower->list == &dev->adj_list.lower)
4887 return NULL;
4888
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02004889 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02004890
4891 return lower->private;
4892}
4893EXPORT_SYMBOL(netdev_lower_get_next_private);
4894
4895/**
4896 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4897 * lower neighbour list, RCU
4898 * variant
4899 * @dev: device
4900 * @iter: list_head ** of the current position
4901 *
4902 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4903 * list, starting from iter position. The caller must hold RCU read lock.
4904 */
4905void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4906 struct list_head **iter)
4907{
4908 struct netdev_adjacent *lower;
4909
4910 WARN_ON_ONCE(!rcu_read_lock_held());
4911
4912 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4913
4914 if (&lower->list == &dev->adj_list.lower)
4915 return NULL;
4916
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02004917 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02004918
4919 return lower->private;
4920}
4921EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4922
4923/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04004924 * netdev_lower_get_next - Get the next device from the lower neighbour
4925 * list
4926 * @dev: device
4927 * @iter: list_head ** of the current position
4928 *
4929 * Gets the next netdev_adjacent from the dev's lower neighbour
4930 * list, starting from iter position. The caller must hold RTNL lock or
4931 * its own locking that guarantees that the neighbour lower
4932 * list will remain unchainged.
4933 */
4934void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4935{
4936 struct netdev_adjacent *lower;
4937
4938 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4939
4940 if (&lower->list == &dev->adj_list.lower)
4941 return NULL;
4942
4943 *iter = &lower->list;
4944
4945 return lower->dev;
4946}
4947EXPORT_SYMBOL(netdev_lower_get_next);
4948
4949/**
dingtianhonge001bfa2013-12-13 10:19:55 +08004950 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4951 * lower neighbour list, RCU
4952 * variant
4953 * @dev: device
4954 *
4955 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4956 * list. The caller must hold RCU read lock.
4957 */
4958void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4959{
4960 struct netdev_adjacent *lower;
4961
4962 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4963 struct netdev_adjacent, list);
4964 if (lower)
4965 return lower->private;
4966 return NULL;
4967}
4968EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4969
4970/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004971 * netdev_master_upper_dev_get_rcu - Get master upper device
4972 * @dev: device
4973 *
4974 * Find a master upper device and return pointer to it or NULL in case
4975 * it's not there. The caller must hold the RCU read lock.
4976 */
4977struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4978{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004979 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004980
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004981 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004982 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004983 if (upper && likely(upper->master))
4984 return upper->dev;
4985 return NULL;
4986}
4987EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4988
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05304989static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01004990 struct net_device *adj_dev,
4991 struct list_head *dev_list)
4992{
4993 char linkname[IFNAMSIZ+7];
4994 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4995 "upper_%s" : "lower_%s", adj_dev->name);
4996 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4997 linkname);
4998}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05304999static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005000 char *name,
5001 struct list_head *dev_list)
5002{
5003 char linkname[IFNAMSIZ+7];
5004 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5005 "upper_%s" : "lower_%s", name);
5006 sysfs_remove_link(&(dev->dev.kobj), linkname);
5007}
5008
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005009static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5010 struct net_device *adj_dev,
5011 struct list_head *dev_list)
5012{
5013 return (dev_list == &dev->adj_list.upper ||
5014 dev_list == &dev->adj_list.lower) &&
5015 net_eq(dev_net(dev), dev_net(adj_dev));
5016}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005017
Veaceslav Falico5d261912013-08-28 23:25:05 +02005018static int __netdev_adjacent_dev_insert(struct net_device *dev,
5019 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02005020 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005021 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005022{
5023 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005024 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005025
Veaceslav Falico7863c052013-09-25 09:20:06 +02005026 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005027
5028 if (adj) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005029 adj->ref_nr++;
5030 return 0;
5031 }
5032
5033 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5034 if (!adj)
5035 return -ENOMEM;
5036
5037 adj->dev = adj_dev;
5038 adj->master = master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005039 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005040 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005041 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005042
5043 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5044 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005045
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005046 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005047 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02005048 if (ret)
5049 goto free_adj;
5050 }
5051
Veaceslav Falico7863c052013-09-25 09:20:06 +02005052 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005053 if (master) {
5054 ret = sysfs_create_link(&(dev->dev.kobj),
5055 &(adj_dev->dev.kobj), "master");
5056 if (ret)
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02005057 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005058
Veaceslav Falico7863c052013-09-25 09:20:06 +02005059 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005060 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02005061 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005062 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005063
5064 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005065
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02005066remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005067 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005068 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005069free_adj:
5070 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02005071 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005072
5073 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005074}
5075
stephen hemminger1d143d92013-12-29 14:01:29 -08005076static void __netdev_adjacent_dev_remove(struct net_device *dev,
5077 struct net_device *adj_dev,
5078 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005079{
5080 struct netdev_adjacent *adj;
5081
Veaceslav Falico7863c052013-09-25 09:20:06 +02005082 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005083
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005084 if (!adj) {
5085 pr_err("tried to remove device %s from %s\n",
5086 dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005087 BUG();
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005088 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02005089
5090 if (adj->ref_nr > 1) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005091 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5092 adj->ref_nr-1);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005093 adj->ref_nr--;
5094 return;
5095 }
5096
Veaceslav Falico842d67a2013-09-25 09:20:31 +02005097 if (adj->master)
5098 sysfs_remove_link(&(dev->dev.kobj), "master");
5099
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04005100 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01005101 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02005102
Veaceslav Falico5d261912013-08-28 23:25:05 +02005103 list_del_rcu(&adj->list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005104 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5105 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005106 dev_put(adj_dev);
5107 kfree_rcu(adj, rcu);
5108}
5109
stephen hemminger1d143d92013-12-29 14:01:29 -08005110static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5111 struct net_device *upper_dev,
5112 struct list_head *up_list,
5113 struct list_head *down_list,
5114 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005115{
5116 int ret;
5117
Veaceslav Falico402dae92013-09-25 09:20:09 +02005118 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5119 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005120 if (ret)
5121 return ret;
5122
Veaceslav Falico402dae92013-09-25 09:20:09 +02005123 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5124 false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005125 if (ret) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005126 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005127 return ret;
5128 }
5129
5130 return 0;
5131}
5132
stephen hemminger1d143d92013-12-29 14:01:29 -08005133static int __netdev_adjacent_dev_link(struct net_device *dev,
5134 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005135{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005136 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5137 &dev->all_adj_list.upper,
5138 &upper_dev->all_adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005139 NULL, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005140}
5141
stephen hemminger1d143d92013-12-29 14:01:29 -08005142static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5143 struct net_device *upper_dev,
5144 struct list_head *up_list,
5145 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005146{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005147 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5148 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005149}
5150
stephen hemminger1d143d92013-12-29 14:01:29 -08005151static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5152 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005153{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005154 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5155 &dev->all_adj_list.upper,
5156 &upper_dev->all_adj_list.lower);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005157}
5158
stephen hemminger1d143d92013-12-29 14:01:29 -08005159static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5160 struct net_device *upper_dev,
5161 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005162{
5163 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5164
5165 if (ret)
5166 return ret;
5167
5168 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5169 &dev->adj_list.upper,
5170 &upper_dev->adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005171 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005172 if (ret) {
5173 __netdev_adjacent_dev_unlink(dev, upper_dev);
5174 return ret;
5175 }
5176
5177 return 0;
5178}
5179
stephen hemminger1d143d92013-12-29 14:01:29 -08005180static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5181 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005182{
5183 __netdev_adjacent_dev_unlink(dev, upper_dev);
5184 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5185 &dev->adj_list.upper,
5186 &upper_dev->adj_list.lower);
5187}
Veaceslav Falico5d261912013-08-28 23:25:05 +02005188
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005189static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02005190 struct net_device *upper_dev, bool master,
5191 void *private)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005192{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005193 struct netdev_adjacent *i, *j, *to_i, *to_j;
5194 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005195
5196 ASSERT_RTNL();
5197
5198 if (dev == upper_dev)
5199 return -EBUSY;
5200
5201 /* To prevent loops, check if dev is not upper device to upper_dev. */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005202 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005203 return -EBUSY;
5204
Vlad Yasevichd66bf7d2015-05-02 21:33:44 -04005205 if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005206 return -EEXIST;
5207
5208 if (master && netdev_master_upper_dev_get(dev))
5209 return -EBUSY;
5210
Veaceslav Falico402dae92013-09-25 09:20:09 +02005211 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5212 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005213 if (ret)
5214 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005215
Veaceslav Falico5d261912013-08-28 23:25:05 +02005216 /* Now that we linked these devs, make all the upper_dev's
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005217 * all_adj_list.upper visible to every dev's all_adj_list.lower an
Veaceslav Falico5d261912013-08-28 23:25:05 +02005218 * versa, and don't forget the devices itself. All of these
5219 * links are non-neighbours.
5220 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005221 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5222 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5223 pr_debug("Interlinking %s with %s, non-neighbour\n",
5224 i->dev->name, j->dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005225 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5226 if (ret)
5227 goto rollback_mesh;
5228 }
5229 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005230
Veaceslav Falico5d261912013-08-28 23:25:05 +02005231 /* add dev to every upper_dev's upper device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005232 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5233 pr_debug("linking %s's upper device %s with %s\n",
5234 upper_dev->name, i->dev->name, dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005235 ret = __netdev_adjacent_dev_link(dev, i->dev);
5236 if (ret)
5237 goto rollback_upper_mesh;
5238 }
5239
5240 /* add upper_dev to every dev's lower device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005241 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5242 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5243 i->dev->name, upper_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005244 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5245 if (ret)
5246 goto rollback_lower_mesh;
5247 }
5248
Jiri Pirko42e52bf2013-05-25 04:12:10 +00005249 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005250 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005251
5252rollback_lower_mesh:
5253 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005254 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005255 if (i == to_i)
5256 break;
5257 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5258 }
5259
5260 i = NULL;
5261
5262rollback_upper_mesh:
5263 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005264 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005265 if (i == to_i)
5266 break;
5267 __netdev_adjacent_dev_unlink(dev, i->dev);
5268 }
5269
5270 i = j = NULL;
5271
5272rollback_mesh:
5273 to_i = i;
5274 to_j = j;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005275 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5276 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005277 if (i == to_i && j == to_j)
5278 break;
5279 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5280 }
5281 if (i == to_i)
5282 break;
5283 }
5284
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005285 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005286
5287 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005288}
5289
5290/**
5291 * netdev_upper_dev_link - Add a link to the upper device
5292 * @dev: device
5293 * @upper_dev: new upper device
5294 *
5295 * Adds a link to device which is upper to this one. The caller must hold
5296 * the RTNL lock. On a failure a negative errno code is returned.
5297 * On success the reference counts are adjusted and the function
5298 * returns zero.
5299 */
5300int netdev_upper_dev_link(struct net_device *dev,
5301 struct net_device *upper_dev)
5302{
Veaceslav Falico402dae92013-09-25 09:20:09 +02005303 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005304}
5305EXPORT_SYMBOL(netdev_upper_dev_link);
5306
5307/**
5308 * netdev_master_upper_dev_link - Add a master link to the upper device
5309 * @dev: device
5310 * @upper_dev: new upper device
5311 *
5312 * Adds a link to device which is upper to this one. In this case, only
5313 * one master upper device can be linked, although other non-master devices
5314 * might be linked as well. The caller must hold the RTNL lock.
5315 * On a failure a negative errno code is returned. On success the reference
5316 * counts are adjusted and the function returns zero.
5317 */
5318int netdev_master_upper_dev_link(struct net_device *dev,
5319 struct net_device *upper_dev)
5320{
Veaceslav Falico402dae92013-09-25 09:20:09 +02005321 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005322}
5323EXPORT_SYMBOL(netdev_master_upper_dev_link);
5324
Veaceslav Falico402dae92013-09-25 09:20:09 +02005325int netdev_master_upper_dev_link_private(struct net_device *dev,
5326 struct net_device *upper_dev,
5327 void *private)
5328{
5329 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5330}
5331EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5332
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005333/**
5334 * netdev_upper_dev_unlink - Removes a link to upper device
5335 * @dev: device
5336 * @upper_dev: new upper device
5337 *
5338 * Removes a link to device which is upper to this one. The caller must hold
5339 * the RTNL lock.
5340 */
5341void netdev_upper_dev_unlink(struct net_device *dev,
5342 struct net_device *upper_dev)
5343{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005344 struct netdev_adjacent *i, *j;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005345 ASSERT_RTNL();
5346
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005347 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005348
5349 /* Here is the tricky part. We must remove all dev's lower
5350 * devices from all upper_dev's upper devices and vice
5351 * versa, to maintain the graph relationship.
5352 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005353 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5354 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005355 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5356
5357 /* remove also the devices itself from lower/upper device
5358 * list
5359 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005360 list_for_each_entry(i, &dev->all_adj_list.lower, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005361 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5362
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005363 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005364 __netdev_adjacent_dev_unlink(dev, i->dev);
5365
Jiri Pirko42e52bf2013-05-25 04:12:10 +00005366 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005367}
5368EXPORT_SYMBOL(netdev_upper_dev_unlink);
5369
Moni Shoua61bd3852015-02-03 16:48:29 +02005370/**
5371 * netdev_bonding_info_change - Dispatch event about slave change
5372 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09005373 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02005374 *
5375 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5376 * The caller must hold the RTNL lock.
5377 */
5378void netdev_bonding_info_change(struct net_device *dev,
5379 struct netdev_bonding_info *bonding_info)
5380{
5381 struct netdev_notifier_bonding_info info;
5382
5383 memcpy(&info.bonding_info, bonding_info,
5384 sizeof(struct netdev_bonding_info));
5385 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5386 &info.info);
5387}
5388EXPORT_SYMBOL(netdev_bonding_info_change);
5389
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08005390static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005391{
5392 struct netdev_adjacent *iter;
5393
5394 struct net *net = dev_net(dev);
5395
5396 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5397 if (!net_eq(net,dev_net(iter->dev)))
5398 continue;
5399 netdev_adjacent_sysfs_add(iter->dev, dev,
5400 &iter->dev->adj_list.lower);
5401 netdev_adjacent_sysfs_add(dev, iter->dev,
5402 &dev->adj_list.upper);
5403 }
5404
5405 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5406 if (!net_eq(net,dev_net(iter->dev)))
5407 continue;
5408 netdev_adjacent_sysfs_add(iter->dev, dev,
5409 &iter->dev->adj_list.upper);
5410 netdev_adjacent_sysfs_add(dev, iter->dev,
5411 &dev->adj_list.lower);
5412 }
5413}
5414
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08005415static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005416{
5417 struct netdev_adjacent *iter;
5418
5419 struct net *net = dev_net(dev);
5420
5421 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5422 if (!net_eq(net,dev_net(iter->dev)))
5423 continue;
5424 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5425 &iter->dev->adj_list.lower);
5426 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5427 &dev->adj_list.upper);
5428 }
5429
5430 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5431 if (!net_eq(net,dev_net(iter->dev)))
5432 continue;
5433 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5434 &iter->dev->adj_list.upper);
5435 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5436 &dev->adj_list.lower);
5437 }
5438}
5439
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005440void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02005441{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005442 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005443
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005444 struct net *net = dev_net(dev);
5445
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005446 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005447 if (!net_eq(net,dev_net(iter->dev)))
5448 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005449 netdev_adjacent_sysfs_del(iter->dev, oldname,
5450 &iter->dev->adj_list.lower);
5451 netdev_adjacent_sysfs_add(iter->dev, dev,
5452 &iter->dev->adj_list.lower);
5453 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02005454
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005455 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04005456 if (!net_eq(net,dev_net(iter->dev)))
5457 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005458 netdev_adjacent_sysfs_del(iter->dev, oldname,
5459 &iter->dev->adj_list.upper);
5460 netdev_adjacent_sysfs_add(iter->dev, dev,
5461 &iter->dev->adj_list.upper);
5462 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02005463}
Veaceslav Falico402dae92013-09-25 09:20:09 +02005464
5465void *netdev_lower_dev_get_private(struct net_device *dev,
5466 struct net_device *lower_dev)
5467{
5468 struct netdev_adjacent *lower;
5469
5470 if (!lower_dev)
5471 return NULL;
5472 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5473 if (!lower)
5474 return NULL;
5475
5476 return lower->private;
5477}
5478EXPORT_SYMBOL(netdev_lower_dev_get_private);
5479
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005480
5481int dev_get_nest_level(struct net_device *dev,
5482 bool (*type_check)(struct net_device *dev))
5483{
5484 struct net_device *lower = NULL;
5485 struct list_head *iter;
5486 int max_nest = -1;
5487 int nest;
5488
5489 ASSERT_RTNL();
5490
5491 netdev_for_each_lower_dev(dev, lower, iter) {
5492 nest = dev_get_nest_level(lower, type_check);
5493 if (max_nest < nest)
5494 max_nest = nest;
5495 }
5496
5497 if (type_check(dev))
5498 max_nest++;
5499
5500 return max_nest;
5501}
5502EXPORT_SYMBOL(dev_get_nest_level);
5503
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005504static void dev_change_rx_flags(struct net_device *dev, int flags)
5505{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005506 const struct net_device_ops *ops = dev->netdev_ops;
5507
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05005508 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005509 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005510}
5511
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005512static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07005513{
Eric Dumazetb536db92011-11-30 21:42:26 +00005514 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005515 kuid_t uid;
5516 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005517
Patrick McHardy24023452007-07-14 18:51:31 -07005518 ASSERT_RTNL();
5519
Wang Chendad9b332008-06-18 01:48:28 -07005520 dev->flags |= IFF_PROMISC;
5521 dev->promiscuity += inc;
5522 if (dev->promiscuity == 0) {
5523 /*
5524 * Avoid overflow.
5525 * If inc causes overflow, untouch promisc and return error.
5526 */
5527 if (inc < 0)
5528 dev->flags &= ~IFF_PROMISC;
5529 else {
5530 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005531 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5532 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005533 return -EOVERFLOW;
5534 }
5535 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005536 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005537 pr_info("device %s %s promiscuous mode\n",
5538 dev->name,
5539 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005540 if (audit_enabled) {
5541 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005542 audit_log(current->audit_context, GFP_ATOMIC,
5543 AUDIT_ANOM_PROMISCUOUS,
5544 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5545 dev->name, (dev->flags & IFF_PROMISC),
5546 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005547 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005548 from_kuid(&init_user_ns, uid),
5549 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005550 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005551 }
Patrick McHardy24023452007-07-14 18:51:31 -07005552
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005553 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005554 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005555 if (notify)
5556 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07005557 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005558}
5559
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560/**
5561 * dev_set_promiscuity - update promiscuity count on a device
5562 * @dev: device
5563 * @inc: modifier
5564 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005565 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 * remains above zero the interface remains promiscuous. Once it hits zero
5567 * the device reverts back to normal filtering operation. A negative inc
5568 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005569 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570 */
Wang Chendad9b332008-06-18 01:48:28 -07005571int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005572{
Eric Dumazetb536db92011-11-30 21:42:26 +00005573 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005574 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005575
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005576 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005577 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005578 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005579 if (dev->flags != old_flags)
5580 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005581 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005583EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005584
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005585static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005586{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005587 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005588
Patrick McHardy24023452007-07-14 18:51:31 -07005589 ASSERT_RTNL();
5590
Linus Torvalds1da177e2005-04-16 15:20:36 -07005591 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005592 dev->allmulti += inc;
5593 if (dev->allmulti == 0) {
5594 /*
5595 * Avoid overflow.
5596 * If inc causes overflow, untouch allmulti and return error.
5597 */
5598 if (inc < 0)
5599 dev->flags &= ~IFF_ALLMULTI;
5600 else {
5601 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005602 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5603 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005604 return -EOVERFLOW;
5605 }
5606 }
Patrick McHardy24023452007-07-14 18:51:31 -07005607 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005608 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005609 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005610 if (notify)
5611 __dev_notify_flags(dev, old_flags,
5612 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07005613 }
Wang Chendad9b332008-06-18 01:48:28 -07005614 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005615}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005616
5617/**
5618 * dev_set_allmulti - update allmulti count on a device
5619 * @dev: device
5620 * @inc: modifier
5621 *
5622 * Add or remove reception of all multicast frames to a device. While the
5623 * count in the device remains above zero the interface remains listening
5624 * to all interfaces. Once it hits zero the device reverts back to normal
5625 * filtering operation. A negative @inc value is used to drop the counter
5626 * when releasing a resource needing all multicasts.
5627 * Return 0 if successful or a negative errno code on error.
5628 */
5629
5630int dev_set_allmulti(struct net_device *dev, int inc)
5631{
5632 return __dev_set_allmulti(dev, inc, true);
5633}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005634EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005635
5636/*
5637 * Upload unicast and multicast address lists to device and
5638 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005639 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005640 * are present.
5641 */
5642void __dev_set_rx_mode(struct net_device *dev)
5643{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005644 const struct net_device_ops *ops = dev->netdev_ops;
5645
Patrick McHardy4417da62007-06-27 01:28:10 -07005646 /* dev_open will call this function so the list will stay sane. */
5647 if (!(dev->flags&IFF_UP))
5648 return;
5649
5650 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005651 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005652
Jiri Pirko01789342011-08-16 06:29:00 +00005653 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005654 /* Unicast addresses changes may only happen under the rtnl,
5655 * therefore calling __dev_set_promiscuity here is safe.
5656 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005657 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005658 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005659 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005660 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005661 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005662 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005663 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005664 }
Jiri Pirko01789342011-08-16 06:29:00 +00005665
5666 if (ops->ndo_set_rx_mode)
5667 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005668}
5669
5670void dev_set_rx_mode(struct net_device *dev)
5671{
David S. Millerb9e40852008-07-15 00:15:08 -07005672 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005673 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005674 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675}
5676
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005677/**
5678 * dev_get_flags - get flags reported to userspace
5679 * @dev: device
5680 *
5681 * Get the combination of flag bits exported through APIs to userspace.
5682 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005683unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005684{
Eric Dumazet95c96172012-04-15 05:58:06 +00005685 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005686
5687 flags = (dev->flags & ~(IFF_PROMISC |
5688 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005689 IFF_RUNNING |
5690 IFF_LOWER_UP |
5691 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005692 (dev->gflags & (IFF_PROMISC |
5693 IFF_ALLMULTI));
5694
Stefan Rompfb00055a2006-03-20 17:09:11 -08005695 if (netif_running(dev)) {
5696 if (netif_oper_up(dev))
5697 flags |= IFF_RUNNING;
5698 if (netif_carrier_ok(dev))
5699 flags |= IFF_LOWER_UP;
5700 if (netif_dormant(dev))
5701 flags |= IFF_DORMANT;
5702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703
5704 return flags;
5705}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005706EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005707
Patrick McHardybd380812010-02-26 06:34:53 +00005708int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005709{
Eric Dumazetb536db92011-11-30 21:42:26 +00005710 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005711 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712
Patrick McHardy24023452007-07-14 18:51:31 -07005713 ASSERT_RTNL();
5714
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715 /*
5716 * Set the flags on our device.
5717 */
5718
5719 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5720 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5721 IFF_AUTOMEDIA)) |
5722 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5723 IFF_ALLMULTI));
5724
5725 /*
5726 * Load in the correct multicast list now the flags have changed.
5727 */
5728
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005729 if ((old_flags ^ flags) & IFF_MULTICAST)
5730 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005731
Patrick McHardy4417da62007-06-27 01:28:10 -07005732 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005733
5734 /*
5735 * Have we downed the interface. We handle IFF_UP ourselves
5736 * according to user attempts to set it, rather than blindly
5737 * setting it.
5738 */
5739
5740 ret = 0;
Peter Pan(潘卫平)d215d102014-06-16 21:57:22 +08005741 if ((old_flags ^ flags) & IFF_UP)
Patrick McHardybd380812010-02-26 06:34:53 +00005742 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005743
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005745 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005746 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005747
Linus Torvalds1da177e2005-04-16 15:20:36 -07005748 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005749
5750 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5751 if (dev->flags != old_flags)
5752 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005753 }
5754
5755 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5756 is important. Some (broken) drivers set IFF_PROMISC, when
5757 IFF_ALLMULTI is requested not asking us and not reporting.
5758 */
5759 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005760 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5761
Linus Torvalds1da177e2005-04-16 15:20:36 -07005762 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005763 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005764 }
5765
Patrick McHardybd380812010-02-26 06:34:53 +00005766 return ret;
5767}
5768
Nicolas Dichtela528c212013-09-25 12:02:44 +02005769void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5770 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00005771{
5772 unsigned int changes = dev->flags ^ old_flags;
5773
Nicolas Dichtela528c212013-09-25 12:02:44 +02005774 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005775 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005776
Patrick McHardybd380812010-02-26 06:34:53 +00005777 if (changes & IFF_UP) {
5778 if (dev->flags & IFF_UP)
5779 call_netdevice_notifiers(NETDEV_UP, dev);
5780 else
5781 call_netdevice_notifiers(NETDEV_DOWN, dev);
5782 }
5783
5784 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00005785 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5786 struct netdev_notifier_change_info change_info;
5787
5788 change_info.flags_changed = changes;
5789 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5790 &change_info.info);
5791 }
Patrick McHardybd380812010-02-26 06:34:53 +00005792}
5793
5794/**
5795 * dev_change_flags - change device settings
5796 * @dev: device
5797 * @flags: device state flags
5798 *
5799 * Change settings on device based state flags. The flags are
5800 * in the userspace exported format.
5801 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005802int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005803{
Eric Dumazetb536db92011-11-30 21:42:26 +00005804 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005805 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00005806
5807 ret = __dev_change_flags(dev, flags);
5808 if (ret < 0)
5809 return ret;
5810
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005811 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005812 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005813 return ret;
5814}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005815EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005816
Veaceslav Falico2315dc92014-01-10 16:56:25 +01005817static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5818{
5819 const struct net_device_ops *ops = dev->netdev_ops;
5820
5821 if (ops->ndo_change_mtu)
5822 return ops->ndo_change_mtu(dev, new_mtu);
5823
5824 dev->mtu = new_mtu;
5825 return 0;
5826}
5827
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005828/**
5829 * dev_set_mtu - Change maximum transfer unit
5830 * @dev: device
5831 * @new_mtu: new transfer unit
5832 *
5833 * Change the maximum transfer size of the network device.
5834 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005835int dev_set_mtu(struct net_device *dev, int new_mtu)
5836{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01005837 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005838
5839 if (new_mtu == dev->mtu)
5840 return 0;
5841
5842 /* MTU must be positive. */
5843 if (new_mtu < 0)
5844 return -EINVAL;
5845
5846 if (!netif_device_present(dev))
5847 return -ENODEV;
5848
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01005849 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5850 err = notifier_to_errno(err);
5851 if (err)
5852 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005853
Veaceslav Falico2315dc92014-01-10 16:56:25 +01005854 orig_mtu = dev->mtu;
5855 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005856
Veaceslav Falico2315dc92014-01-10 16:56:25 +01005857 if (!err) {
5858 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5859 err = notifier_to_errno(err);
5860 if (err) {
5861 /* setting mtu back and notifying everyone again,
5862 * so that they have a chance to revert changes.
5863 */
5864 __dev_set_mtu(dev, orig_mtu);
5865 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5866 }
5867 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005868 return err;
5869}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005870EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005871
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005872/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005873 * dev_set_group - Change group this device belongs to
5874 * @dev: device
5875 * @new_group: group this device should belong to
5876 */
5877void dev_set_group(struct net_device *dev, int new_group)
5878{
5879 dev->group = new_group;
5880}
5881EXPORT_SYMBOL(dev_set_group);
5882
5883/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005884 * dev_set_mac_address - Change Media Access Control Address
5885 * @dev: device
5886 * @sa: new address
5887 *
5888 * Change the hardware (MAC) address of the device
5889 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005890int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5891{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005892 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005893 int err;
5894
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005895 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896 return -EOPNOTSUPP;
5897 if (sa->sa_family != dev->type)
5898 return -EINVAL;
5899 if (!netif_device_present(dev))
5900 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005901 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005902 if (err)
5903 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005904 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005905 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005906 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005907 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005908}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005909EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005910
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005911/**
5912 * dev_change_carrier - Change device carrier
5913 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00005914 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005915 *
5916 * Change device carrier
5917 */
5918int dev_change_carrier(struct net_device *dev, bool new_carrier)
5919{
5920 const struct net_device_ops *ops = dev->netdev_ops;
5921
5922 if (!ops->ndo_change_carrier)
5923 return -EOPNOTSUPP;
5924 if (!netif_device_present(dev))
5925 return -ENODEV;
5926 return ops->ndo_change_carrier(dev, new_carrier);
5927}
5928EXPORT_SYMBOL(dev_change_carrier);
5929
Linus Torvalds1da177e2005-04-16 15:20:36 -07005930/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02005931 * dev_get_phys_port_id - Get device physical port ID
5932 * @dev: device
5933 * @ppid: port ID
5934 *
5935 * Get device physical port ID
5936 */
5937int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01005938 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02005939{
5940 const struct net_device_ops *ops = dev->netdev_ops;
5941
5942 if (!ops->ndo_get_phys_port_id)
5943 return -EOPNOTSUPP;
5944 return ops->ndo_get_phys_port_id(dev, ppid);
5945}
5946EXPORT_SYMBOL(dev_get_phys_port_id);
5947
5948/**
David Aherndb24a902015-03-17 20:23:15 -06005949 * dev_get_phys_port_name - Get device physical port name
5950 * @dev: device
5951 * @name: port name
5952 *
5953 * Get device physical port name
5954 */
5955int dev_get_phys_port_name(struct net_device *dev,
5956 char *name, size_t len)
5957{
5958 const struct net_device_ops *ops = dev->netdev_ops;
5959
5960 if (!ops->ndo_get_phys_port_name)
5961 return -EOPNOTSUPP;
5962 return ops->ndo_get_phys_port_name(dev, name, len);
5963}
5964EXPORT_SYMBOL(dev_get_phys_port_name);
5965
5966/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005967 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005968 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005969 *
5970 * Returns a suitable unique value for a new device interface
5971 * number. The caller must hold the rtnl semaphore or the
5972 * dev_base_lock to be sure it remains unique.
5973 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005974static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005975{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005976 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005977 for (;;) {
5978 if (++ifindex <= 0)
5979 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005980 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005981 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005982 }
5983}
5984
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005986static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07005987DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005989static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005990{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005991 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005992 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005993}
5994
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005995static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005996{
Krishna Kumare93737b2009-12-08 22:26:02 +00005997 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005998 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005999
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006000 BUG_ON(dev_boot_phase);
6001 ASSERT_RTNL();
6002
Krishna Kumare93737b2009-12-08 22:26:02 +00006003 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006004 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00006005 * for initialization unwind. Remove those
6006 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006007 */
6008 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006009 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6010 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006011
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006012 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00006013 list_del(&dev->unreg_list);
6014 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006015 }
Eric Dumazet449f4542011-05-19 12:24:16 +00006016 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006017 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00006018 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006019
Octavian Purdila44345722010-12-13 12:44:07 +00006020 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006021 list_for_each_entry(dev, head, unreg_list)
6022 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04006023 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006024
Octavian Purdila44345722010-12-13 12:44:07 +00006025 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006026 /* And unlink it from device chain. */
6027 unlist_netdevice(dev);
6028
6029 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006030 }
6031
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006032 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006033
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006034 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006035 struct sk_buff *skb = NULL;
6036
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006037 /* Shutdown queueing discipline. */
6038 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006039
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006040
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006041 /* Notify protocols, that we are about to destroy
6042 this device. They should clean all the things.
6043 */
6044 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6045
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006046 if (!dev->rtnl_link_ops ||
6047 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6048 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6049 GFP_KERNEL);
6050
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006051 /*
6052 * Flush the unicast and multicast chains
6053 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006054 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006055 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006056
6057 if (dev->netdev_ops->ndo_uninit)
6058 dev->netdev_ops->ndo_uninit(dev);
6059
Mahesh Bandewar395eea62014-12-03 13:46:24 -08006060 if (skb)
6061 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07006062
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006063 /* Notifier chain MUST detach us all upper devices. */
6064 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006065
6066 /* Remove entries from kobject tree */
6067 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00006068#ifdef CONFIG_XPS
6069 /* Remove XPS queueing entries */
6070 netif_reset_xps_queues_gt(dev, 0);
6071#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006072 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006073
Eric W. Biederman850a5452011-10-13 22:25:23 +00006074 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006075
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00006076 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006077 dev_put(dev);
6078}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006079
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006080static void rollback_registered(struct net_device *dev)
6081{
6082 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006083
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006084 list_add(&dev->unreg_list, &single);
6085 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006086 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006087}
6088
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006089static netdev_features_t netdev_fix_features(struct net_device *dev,
6090 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07006091{
Michał Mirosław57422dc2011-01-22 12:14:12 +00006092 /* Fix illegal checksum combinations */
6093 if ((features & NETIF_F_HW_CSUM) &&
6094 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006095 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00006096 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6097 }
6098
Herbert Xub63365a2008-10-23 01:11:29 -07006099 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00006100 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006101 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00006102 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07006103 }
6104
Pravin B Shelarec5f0612013-03-07 09:28:01 +00006105 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6106 !(features & NETIF_F_IP_CSUM)) {
6107 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6108 features &= ~NETIF_F_TSO;
6109 features &= ~NETIF_F_TSO_ECN;
6110 }
6111
6112 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6113 !(features & NETIF_F_IPV6_CSUM)) {
6114 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6115 features &= ~NETIF_F_TSO6;
6116 }
6117
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00006118 /* TSO ECN requires that TSO is present as well. */
6119 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6120 features &= ~NETIF_F_TSO_ECN;
6121
Michał Mirosław212b5732011-02-15 16:59:16 +00006122 /* Software GSO depends on SG. */
6123 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006124 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00006125 features &= ~NETIF_F_GSO;
6126 }
6127
Michał Mirosławacd11302011-01-24 15:45:15 -08006128 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07006129 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00006130 /* maybe split UFO into V4 and V6? */
6131 if (!((features & NETIF_F_GEN_CSUM) ||
6132 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6133 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006134 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006135 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006136 features &= ~NETIF_F_UFO;
6137 }
6138
6139 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006140 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006141 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006142 features &= ~NETIF_F_UFO;
6143 }
6144 }
6145
Jiri Pirkod0290212014-04-02 23:09:31 +02006146#ifdef CONFIG_NET_RX_BUSY_POLL
6147 if (dev->netdev_ops->ndo_busy_poll)
6148 features |= NETIF_F_BUSY_POLL;
6149 else
6150#endif
6151 features &= ~NETIF_F_BUSY_POLL;
6152
Herbert Xub63365a2008-10-23 01:11:29 -07006153 return features;
6154}
Herbert Xub63365a2008-10-23 01:11:29 -07006155
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006156int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00006157{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006158 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00006159 int err = 0;
6160
Michał Mirosław87267482011-04-12 09:56:38 +00006161 ASSERT_RTNL();
6162
Michał Mirosław5455c692011-02-15 16:59:17 +00006163 features = netdev_get_wanted_features(dev);
6164
6165 if (dev->netdev_ops->ndo_fix_features)
6166 features = dev->netdev_ops->ndo_fix_features(dev, features);
6167
6168 /* driver might be less strict about feature dependencies */
6169 features = netdev_fix_features(dev, features);
6170
6171 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006172 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00006173
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006174 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6175 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00006176
6177 if (dev->netdev_ops->ndo_set_features)
6178 err = dev->netdev_ops->ndo_set_features(dev, features);
6179
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006180 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00006181 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006182 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6183 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006184 return -1;
6185 }
6186
6187 if (!err)
6188 dev->features = features;
6189
6190 return 1;
6191}
6192
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006193/**
6194 * netdev_update_features - recalculate device features
6195 * @dev: the device to check
6196 *
6197 * Recalculate dev->features set and send notifications if it
6198 * has changed. Should be called after driver or hardware dependent
6199 * conditions might have changed that influence the features.
6200 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006201void netdev_update_features(struct net_device *dev)
6202{
6203 if (__netdev_update_features(dev))
6204 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00006205}
6206EXPORT_SYMBOL(netdev_update_features);
6207
Linus Torvalds1da177e2005-04-16 15:20:36 -07006208/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006209 * netdev_change_features - recalculate device features
6210 * @dev: the device to check
6211 *
6212 * Recalculate dev->features set and send notifications even
6213 * if they have not changed. Should be called instead of
6214 * netdev_update_features() if also dev->vlan_features might
6215 * have changed to allow the changes to be propagated to stacked
6216 * VLAN devices.
6217 */
6218void netdev_change_features(struct net_device *dev)
6219{
6220 __netdev_update_features(dev);
6221 netdev_features_change(dev);
6222}
6223EXPORT_SYMBOL(netdev_change_features);
6224
6225/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006226 * netif_stacked_transfer_operstate - transfer operstate
6227 * @rootdev: the root or lower level device to transfer state from
6228 * @dev: the device to transfer operstate to
6229 *
6230 * Transfer operational state from root to device. This is normally
6231 * called when a stacking relationship exists between the root
6232 * device and the device(a leaf device).
6233 */
6234void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6235 struct net_device *dev)
6236{
6237 if (rootdev->operstate == IF_OPER_DORMANT)
6238 netif_dormant_on(dev);
6239 else
6240 netif_dormant_off(dev);
6241
6242 if (netif_carrier_ok(rootdev)) {
6243 if (!netif_carrier_ok(dev))
6244 netif_carrier_on(dev);
6245 } else {
6246 if (netif_carrier_ok(dev))
6247 netif_carrier_off(dev);
6248 }
6249}
6250EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6251
Michael Daltona953be52014-01-16 22:23:28 -08006252#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006253static int netif_alloc_rx_queues(struct net_device *dev)
6254{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006255 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006256 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05306257 size_t sz = count * sizeof(*rx);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006258
Tom Herbertbd25fa72010-10-18 18:00:16 +00006259 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006260
Pankaj Gupta10595902015-01-12 11:41:28 +05306261 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6262 if (!rx) {
6263 rx = vzalloc(sz);
6264 if (!rx)
6265 return -ENOMEM;
6266 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00006267 dev->_rx = rx;
6268
Tom Herbertbd25fa72010-10-18 18:00:16 +00006269 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006270 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006271 return 0;
6272}
Tom Herbertbf264142010-11-26 08:36:09 +00006273#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006274
Changli Gaoaa942102010-12-04 02:31:41 +00006275static void netdev_init_one_queue(struct net_device *dev,
6276 struct netdev_queue *queue, void *_unused)
6277{
6278 /* Initialize queue lock */
6279 spin_lock_init(&queue->_xmit_lock);
6280 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6281 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006282 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006283 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006284#ifdef CONFIG_BQL
6285 dql_init(&queue->dql, HZ);
6286#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006287}
6288
Eric Dumazet60877a32013-06-20 01:15:51 -07006289static void netif_free_tx_queues(struct net_device *dev)
6290{
WANG Cong4cb28972014-06-02 15:55:22 -07006291 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07006292}
6293
Tom Herberte6484932010-10-18 18:04:39 +00006294static int netif_alloc_netdev_queues(struct net_device *dev)
6295{
6296 unsigned int count = dev->num_tx_queues;
6297 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07006298 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00006299
Eric Dumazet60877a32013-06-20 01:15:51 -07006300 BUG_ON(count < 1 || count > 0xffff);
Tom Herberte6484932010-10-18 18:04:39 +00006301
Eric Dumazet60877a32013-06-20 01:15:51 -07006302 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6303 if (!tx) {
6304 tx = vzalloc(sz);
6305 if (!tx)
6306 return -ENOMEM;
6307 }
Tom Herberte6484932010-10-18 18:04:39 +00006308 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006309
Tom Herberte6484932010-10-18 18:04:39 +00006310 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6311 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006312
6313 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006314}
6315
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006316/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006317 * register_netdevice - register a network device
6318 * @dev: device to register
6319 *
6320 * Take a completed network device structure and add it to the kernel
6321 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6322 * chain. 0 is returned on success. A negative errno code is returned
6323 * on a failure to set up the device, or if the name is a duplicate.
6324 *
6325 * Callers must hold the rtnl semaphore. You may want
6326 * register_netdev() instead of this.
6327 *
6328 * BUGS:
6329 * The locking appears insufficient to guarantee two parallel registers
6330 * will not get the same name.
6331 */
6332
6333int register_netdevice(struct net_device *dev)
6334{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006335 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006336 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006337
6338 BUG_ON(dev_boot_phase);
6339 ASSERT_RTNL();
6340
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006341 might_sleep();
6342
Linus Torvalds1da177e2005-04-16 15:20:36 -07006343 /* When net_device's are persistent, this will be fatal. */
6344 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006345 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006346
David S. Millerf1f28aa2008-07-15 00:08:33 -07006347 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006348 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006349
Gao feng828de4f2012-09-13 20:58:27 +00006350 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006351 if (ret < 0)
6352 goto out;
6353
Linus Torvalds1da177e2005-04-16 15:20:36 -07006354 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006355 if (dev->netdev_ops->ndo_init) {
6356 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006357 if (ret) {
6358 if (ret > 0)
6359 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006360 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361 }
6362 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006363
Patrick McHardyf6469682013-04-19 02:04:27 +00006364 if (((dev->hw_features | dev->features) &
6365 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00006366 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6367 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6368 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6369 ret = -EINVAL;
6370 goto err_uninit;
6371 }
6372
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006373 ret = -EBUSY;
6374 if (!dev->ifindex)
6375 dev->ifindex = dev_new_index(net);
6376 else if (__dev_get_by_index(net, dev->ifindex))
6377 goto err_uninit;
6378
Michał Mirosław5455c692011-02-15 16:59:17 +00006379 /* Transfer changeable features to wanted_features and enable
6380 * software offloads (GSO and GRO).
6381 */
6382 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006383 dev->features |= NETIF_F_SOFT_FEATURES;
6384 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006385
Michał Mirosław34324dc2011-11-15 15:29:55 +00006386 if (!(dev->flags & IFF_LOOPBACK)) {
6387 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006388 }
6389
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006390 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006391 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006392 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006393
Pravin B Shelaree579672013-03-07 09:28:08 +00006394 /* Make NETIF_F_SG inheritable to tunnel devices.
6395 */
6396 dev->hw_enc_features |= NETIF_F_SG;
6397
Simon Horman0d89d202013-05-23 21:02:52 +00006398 /* Make NETIF_F_SG inheritable to MPLS.
6399 */
6400 dev->mpls_features |= NETIF_F_SG;
6401
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006402 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6403 ret = notifier_to_errno(ret);
6404 if (ret)
6405 goto err_uninit;
6406
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006407 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006408 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006409 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006410 dev->reg_state = NETREG_REGISTERED;
6411
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006412 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006413
Linus Torvalds1da177e2005-04-16 15:20:36 -07006414 /*
6415 * Default initial state at registry is that the
6416 * device is present.
6417 */
6418
6419 set_bit(__LINK_STATE_PRESENT, &dev->state);
6420
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006421 linkwatch_init_dev(dev);
6422
Linus Torvalds1da177e2005-04-16 15:20:36 -07006423 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006424 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006425 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006426 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006427
Jiri Pirko948b3372013-01-08 01:38:25 +00006428 /* If the device has permanent device address, driver should
6429 * set dev_addr and also addr_assign_type should be set to
6430 * NET_ADDR_PERM (default value).
6431 */
6432 if (dev->addr_assign_type == NET_ADDR_PERM)
6433 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6434
Linus Torvalds1da177e2005-04-16 15:20:36 -07006435 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006436 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006437 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006438 if (ret) {
6439 rollback_registered(dev);
6440 dev->reg_state = NETREG_UNREGISTERED;
6441 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006442 /*
6443 * Prevent userspace races by waiting until the network
6444 * device is fully setup before sending notifications.
6445 */
Patrick McHardya2835762010-02-26 06:34:51 +00006446 if (!dev->rtnl_link_ops ||
6447 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006448 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006449
6450out:
6451 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006452
6453err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006454 if (dev->netdev_ops->ndo_uninit)
6455 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006456 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006457}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006458EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006459
6460/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006461 * init_dummy_netdev - init a dummy network device for NAPI
6462 * @dev: device to init
6463 *
6464 * This takes a network device structure and initialize the minimum
6465 * amount of fields so it can be used to schedule NAPI polls without
6466 * registering a full blown interface. This is to be used by drivers
6467 * that need to tie several hardware interfaces to a single NAPI
6468 * poll scheduler due to HW limitations.
6469 */
6470int init_dummy_netdev(struct net_device *dev)
6471{
6472 /* Clear everything. Note we don't initialize spinlocks
6473 * are they aren't supposed to be taken by any of the
6474 * NAPI code and this dummy netdev is supposed to be
6475 * only ever used for NAPI polls
6476 */
6477 memset(dev, 0, sizeof(struct net_device));
6478
6479 /* make sure we BUG if trying to hit standard
6480 * register/unregister code path
6481 */
6482 dev->reg_state = NETREG_DUMMY;
6483
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006484 /* NAPI wants this */
6485 INIT_LIST_HEAD(&dev->napi_list);
6486
6487 /* a dummy interface is started by default */
6488 set_bit(__LINK_STATE_PRESENT, &dev->state);
6489 set_bit(__LINK_STATE_START, &dev->state);
6490
Eric Dumazet29b44332010-10-11 10:22:12 +00006491 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6492 * because users of this 'device' dont need to change
6493 * its refcount.
6494 */
6495
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006496 return 0;
6497}
6498EXPORT_SYMBOL_GPL(init_dummy_netdev);
6499
6500
6501/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502 * register_netdev - register a network device
6503 * @dev: device to register
6504 *
6505 * Take a completed network device structure and add it to the kernel
6506 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6507 * chain. 0 is returned on success. A negative errno code is returned
6508 * on a failure to set up the device, or if the name is a duplicate.
6509 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006510 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006511 * and expands the device name if you passed a format string to
6512 * alloc_netdev.
6513 */
6514int register_netdev(struct net_device *dev)
6515{
6516 int err;
6517
6518 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006519 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520 rtnl_unlock();
6521 return err;
6522}
6523EXPORT_SYMBOL(register_netdev);
6524
Eric Dumazet29b44332010-10-11 10:22:12 +00006525int netdev_refcnt_read(const struct net_device *dev)
6526{
6527 int i, refcnt = 0;
6528
6529 for_each_possible_cpu(i)
6530 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6531 return refcnt;
6532}
6533EXPORT_SYMBOL(netdev_refcnt_read);
6534
Ben Hutchings2c530402012-07-10 10:55:09 +00006535/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006536 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006537 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006538 *
6539 * This is called when unregistering network devices.
6540 *
6541 * Any protocol or device that holds a reference should register
6542 * for netdevice notification, and cleanup and put back the
6543 * reference if they receive an UNREGISTER event.
6544 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006545 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006546 */
6547static void netdev_wait_allrefs(struct net_device *dev)
6548{
6549 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006550 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006551
Eric Dumazete014deb2009-11-17 05:59:21 +00006552 linkwatch_forget_dev(dev);
6553
Linus Torvalds1da177e2005-04-16 15:20:36 -07006554 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006555 refcnt = netdev_refcnt_read(dev);
6556
6557 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006559 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006560
6561 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006562 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006563
Eric Dumazet748e2d92012-08-22 21:50:59 +00006564 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006565 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006566 rtnl_lock();
6567
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006568 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006569 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6570 &dev->state)) {
6571 /* We must not have linkwatch events
6572 * pending on unregister. If this
6573 * happens, we simply run the queue
6574 * unscheduled, resulting in a noop
6575 * for this device.
6576 */
6577 linkwatch_run_queue();
6578 }
6579
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006580 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006581
6582 rebroadcast_time = jiffies;
6583 }
6584
6585 msleep(250);
6586
Eric Dumazet29b44332010-10-11 10:22:12 +00006587 refcnt = netdev_refcnt_read(dev);
6588
Linus Torvalds1da177e2005-04-16 15:20:36 -07006589 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006590 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6591 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592 warning_time = jiffies;
6593 }
6594 }
6595}
6596
6597/* The sequence is:
6598 *
6599 * rtnl_lock();
6600 * ...
6601 * register_netdevice(x1);
6602 * register_netdevice(x2);
6603 * ...
6604 * unregister_netdevice(y1);
6605 * unregister_netdevice(y2);
6606 * ...
6607 * rtnl_unlock();
6608 * free_netdev(y1);
6609 * free_netdev(y2);
6610 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006611 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006612 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006613 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006614 * without deadlocking with linkwatch via keventd.
6615 * 2) Since we run with the RTNL semaphore not held, we can sleep
6616 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006617 *
6618 * We must not return until all unregister events added during
6619 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006620 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621void netdev_run_todo(void)
6622{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006623 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006624
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006626 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006627
6628 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006629
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006630
6631 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006632 if (!list_empty(&list))
6633 rcu_barrier();
6634
Linus Torvalds1da177e2005-04-16 15:20:36 -07006635 while (!list_empty(&list)) {
6636 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006637 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006638 list_del(&dev->todo_list);
6639
Eric Dumazet748e2d92012-08-22 21:50:59 +00006640 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006641 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006642 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006643
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006644 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006645 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006646 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006647 dump_stack();
6648 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006649 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006650
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006651 dev->reg_state = NETREG_UNREGISTERED;
6652
Changli Gao152102c2010-03-30 20:16:22 +00006653 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006654
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006655 netdev_wait_allrefs(dev);
6656
6657 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006658 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08006659 BUG_ON(!list_empty(&dev->ptype_all));
6660 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006661 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6662 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006663 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006664
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006665 if (dev->destructor)
6666 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006667
Eric W. Biederman50624c92013-09-23 21:19:49 -07006668 /* Report a network device has been unregistered */
6669 rtnl_lock();
6670 dev_net(dev)->dev_unreg_count--;
6671 __rtnl_unlock();
6672 wake_up(&netdev_unregistering_wq);
6673
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006674 /* Free network device */
6675 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006677}
6678
Ben Hutchings3cfde792010-07-09 09:11:52 +00006679/* Convert net_device_stats to rtnl_link_stats64. They have the same
6680 * fields in the same order, with only the type differing.
6681 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006682void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6683 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006684{
6685#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006686 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6687 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006688#else
6689 size_t i, n = sizeof(*stats64) / sizeof(u64);
6690 const unsigned long *src = (const unsigned long *)netdev_stats;
6691 u64 *dst = (u64 *)stats64;
6692
6693 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6694 sizeof(*stats64) / sizeof(u64));
6695 for (i = 0; i < n; i++)
6696 dst[i] = src[i];
6697#endif
6698}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006699EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006700
Eric Dumazetd83345a2009-11-16 03:36:51 +00006701/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006702 * dev_get_stats - get network device statistics
6703 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006704 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006705 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006706 * Get network statistics from device. Return @storage.
6707 * The device driver may provide its own method by setting
6708 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6709 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006710 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006711struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6712 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006713{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006714 const struct net_device_ops *ops = dev->netdev_ops;
6715
Eric Dumazet28172732010-07-07 14:58:56 -07006716 if (ops->ndo_get_stats64) {
6717 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006718 ops->ndo_get_stats64(dev, storage);
6719 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006720 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006721 } else {
6722 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006723 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006724 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet015f0682014-03-27 08:45:56 -07006725 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006726 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006727}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006728EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006729
Eric Dumazet24824a02010-10-02 06:11:55 +00006730struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006731{
Eric Dumazet24824a02010-10-02 06:11:55 +00006732 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006733
Eric Dumazet24824a02010-10-02 06:11:55 +00006734#ifdef CONFIG_NET_CLS_ACT
6735 if (queue)
6736 return queue;
6737 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6738 if (!queue)
6739 return NULL;
6740 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08006741 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00006742 queue->qdisc_sleeping = &noop_qdisc;
6743 rcu_assign_pointer(dev->ingress_queue, queue);
6744#endif
6745 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006746}
6747
Eric Dumazet2c60db02012-09-16 09:17:26 +00006748static const struct ethtool_ops default_ethtool_ops;
6749
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00006750void netdev_set_default_ethtool_ops(struct net_device *dev,
6751 const struct ethtool_ops *ops)
6752{
6753 if (dev->ethtool_ops == &default_ethtool_ops)
6754 dev->ethtool_ops = ops;
6755}
6756EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6757
Eric Dumazet74d332c2013-10-30 13:10:44 -07006758void netdev_freemem(struct net_device *dev)
6759{
6760 char *addr = (char *)dev - dev->padded;
6761
WANG Cong4cb28972014-06-02 15:55:22 -07006762 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07006763}
6764
Linus Torvalds1da177e2005-04-16 15:20:36 -07006765/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006766 * alloc_netdev_mqs - allocate network device
Tom Gundersenc835a672014-07-14 16:37:24 +02006767 * @sizeof_priv: size of private data to allocate space for
6768 * @name: device name format string
6769 * @name_assign_type: origin of device name
6770 * @setup: callback to initialize device
6771 * @txqs: the number of TX subqueues to allocate
6772 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006773 *
6774 * Allocates a struct net_device with private data area for driver use
Li Zhong90e51ad2013-11-22 15:04:46 +08006775 * and performs basic initialization. Also allocates subqueue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006776 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006777 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006778struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02006779 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00006780 void (*setup)(struct net_device *),
6781 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006783 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006784 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006785 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006786
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006787 BUG_ON(strlen(name) >= sizeof(dev->name));
6788
Tom Herbert36909ea2011-01-09 19:36:31 +00006789 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006790 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006791 return NULL;
6792 }
6793
Michael Daltona953be52014-01-16 22:23:28 -08006794#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00006795 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006796 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006797 return NULL;
6798 }
6799#endif
6800
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006801 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006802 if (sizeof_priv) {
6803 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006804 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006805 alloc_size += sizeof_priv;
6806 }
6807 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006808 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006809
Eric Dumazet74d332c2013-10-30 13:10:44 -07006810 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6811 if (!p)
6812 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00006813 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006814 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006815
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006816 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006817 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006818
Eric Dumazet29b44332010-10-11 10:22:12 +00006819 dev->pcpu_refcnt = alloc_percpu(int);
6820 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07006821 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006822
Linus Torvalds1da177e2005-04-16 15:20:36 -07006823 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006824 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825
Jiri Pirko22bedad32010-04-01 21:22:57 +00006826 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006827 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006828
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006829 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006830
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006831 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006832 dev->gso_max_segs = GSO_MAX_SEGS;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07006833 dev->gso_min_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006834
Herbert Xud565b0a2008-12-15 23:38:52 -08006835 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006836 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006837 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006838 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006839 INIT_LIST_HEAD(&dev->adj_list.upper);
6840 INIT_LIST_HEAD(&dev->adj_list.lower);
6841 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6842 INIT_LIST_HEAD(&dev->all_adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08006843 INIT_LIST_HEAD(&dev->ptype_all);
6844 INIT_LIST_HEAD(&dev->ptype_specific);
Eric Dumazet02875872014-10-05 18:38:35 -07006845 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006846 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006847
6848 dev->num_tx_queues = txqs;
6849 dev->real_num_tx_queues = txqs;
6850 if (netif_alloc_netdev_queues(dev))
6851 goto free_all;
6852
Michael Daltona953be52014-01-16 22:23:28 -08006853#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006854 dev->num_rx_queues = rxqs;
6855 dev->real_num_rx_queues = rxqs;
6856 if (netif_alloc_rx_queues(dev))
6857 goto free_all;
6858#endif
6859
Linus Torvalds1da177e2005-04-16 15:20:36 -07006860 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02006861 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006862 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006863 if (!dev->ethtool_ops)
6864 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006865 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006866
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006867free_all:
6868 free_netdev(dev);
6869 return NULL;
6870
Eric Dumazet29b44332010-10-11 10:22:12 +00006871free_pcpu:
6872 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07006873free_dev:
6874 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006875 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006876}
Tom Herbert36909ea2011-01-09 19:36:31 +00006877EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006878
6879/**
6880 * free_netdev - free network device
6881 * @dev: device
6882 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006883 * This function does the last stage of destroying an allocated device
6884 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006885 * If this is the last reference then it will be freed.
6886 */
6887void free_netdev(struct net_device *dev)
6888{
Herbert Xud565b0a2008-12-15 23:38:52 -08006889 struct napi_struct *p, *n;
6890
Eric Dumazet60877a32013-06-20 01:15:51 -07006891 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08006892#ifdef CONFIG_SYSFS
Pankaj Gupta10595902015-01-12 11:41:28 +05306893 kvfree(dev->_rx);
Tom Herbertfe822242010-11-09 10:47:38 +00006894#endif
David S. Millere8a04642008-07-17 00:34:19 -07006895
Eric Dumazet33d480c2011-08-11 19:30:52 +00006896 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006897
Jiri Pirkof001fde2009-05-05 02:48:28 +00006898 /* Flush device addresses */
6899 dev_addr_flush(dev);
6900
Herbert Xud565b0a2008-12-15 23:38:52 -08006901 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6902 netif_napi_del(p);
6903
Eric Dumazet29b44332010-10-11 10:22:12 +00006904 free_percpu(dev->pcpu_refcnt);
6905 dev->pcpu_refcnt = NULL;
6906
Stephen Hemminger3041a062006-05-26 13:25:24 -07006907 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006908 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07006909 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006910 return;
6911 }
6912
6913 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6914 dev->reg_state = NETREG_RELEASED;
6915
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006916 /* will free via device release */
6917 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006918}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006919EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006920
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006921/**
6922 * synchronize_net - Synchronize with packet receive processing
6923 *
6924 * Wait for packets currently being received to be done.
6925 * Does not block later packets from starting.
6926 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006927void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006928{
6929 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006930 if (rtnl_is_locked())
6931 synchronize_rcu_expedited();
6932 else
6933 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006934}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006935EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006936
6937/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006938 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006939 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006940 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006941 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006942 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006943 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006944 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006945 *
6946 * Callers must hold the rtnl semaphore. You may want
6947 * unregister_netdev() instead of this.
6948 */
6949
Eric Dumazet44a08732009-10-27 07:03:04 +00006950void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006951{
Herbert Xua6620712007-12-12 19:21:56 -08006952 ASSERT_RTNL();
6953
Eric Dumazet44a08732009-10-27 07:03:04 +00006954 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006955 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006956 } else {
6957 rollback_registered(dev);
6958 /* Finish processing unregister after unlock */
6959 net_set_todo(dev);
6960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006961}
Eric Dumazet44a08732009-10-27 07:03:04 +00006962EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006963
6964/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006965 * unregister_netdevice_many - unregister many devices
6966 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07006967 *
6968 * Note: As most callers use a stack allocated list_head,
6969 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006970 */
6971void unregister_netdevice_many(struct list_head *head)
6972{
6973 struct net_device *dev;
6974
6975 if (!list_empty(head)) {
6976 rollback_registered_many(head);
6977 list_for_each_entry(dev, head, unreg_list)
6978 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07006979 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006980 }
6981}
Eric Dumazet63c80992009-10-27 07:06:49 +00006982EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006983
6984/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006985 * unregister_netdev - remove device from the kernel
6986 * @dev: device
6987 *
6988 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006989 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006990 *
6991 * This is just a wrapper for unregister_netdevice that takes
6992 * the rtnl semaphore. In general you want to use this and not
6993 * unregister_netdevice.
6994 */
6995void unregister_netdev(struct net_device *dev)
6996{
6997 rtnl_lock();
6998 unregister_netdevice(dev);
6999 rtnl_unlock();
7000}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007001EXPORT_SYMBOL(unregister_netdev);
7002
Eric W. Biedermance286d32007-09-12 13:53:49 +02007003/**
7004 * dev_change_net_namespace - move device to different nethost namespace
7005 * @dev: device
7006 * @net: network namespace
7007 * @pat: If not NULL name pattern to try if the current device name
7008 * is already taken in the destination network namespace.
7009 *
7010 * This function shuts down a device interface and moves it
7011 * to a new network namespace. On success 0 is returned, on
7012 * a failure a netagive errno code is returned.
7013 *
7014 * Callers must hold the rtnl semaphore.
7015 */
7016
7017int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7018{
Eric W. Biedermance286d32007-09-12 13:53:49 +02007019 int err;
7020
7021 ASSERT_RTNL();
7022
7023 /* Don't allow namespace local devices to be moved. */
7024 err = -EINVAL;
7025 if (dev->features & NETIF_F_NETNS_LOCAL)
7026 goto out;
7027
7028 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02007029 if (dev->reg_state != NETREG_REGISTERED)
7030 goto out;
7031
7032 /* Get out if there is nothing todo */
7033 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09007034 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02007035 goto out;
7036
7037 /* Pick the destination device name, and ensure
7038 * we can use it in the destination network namespace.
7039 */
7040 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00007041 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007042 /* We get here if we can't use the current device name */
7043 if (!pat)
7044 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00007045 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007046 goto out;
7047 }
7048
7049 /*
7050 * And now a mini version of register_netdevice unregister_netdevice.
7051 */
7052
7053 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07007054 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007055
7056 /* And unlink it from device chain */
7057 err = -ENODEV;
7058 unlist_netdevice(dev);
7059
7060 synchronize_net();
7061
7062 /* Shutdown queueing discipline. */
7063 dev_shutdown(dev);
7064
7065 /* Notify protocols, that we are about to destroy
7066 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00007067
7068 Note that dev->reg_state stays at NETREG_REGISTERED.
7069 This is wanted because this way 8021q and macvlan know
7070 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02007071 */
7072 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00007073 rcu_barrier();
7074 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007075 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007076
7077 /*
7078 * Flush the unicast and multicast chains
7079 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00007080 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00007081 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007082
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007083 /* Send a netdev-removed uevent to the old namespace */
7084 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007085 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007086
Eric W. Biedermance286d32007-09-12 13:53:49 +02007087 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09007088 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007089
Eric W. Biedermance286d32007-09-12 13:53:49 +02007090 /* If there is an ifindex conflict assign a new one */
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02007091 if (__dev_get_by_index(net, dev->ifindex))
Eric W. Biedermance286d32007-09-12 13:53:49 +02007092 dev->ifindex = dev_new_index(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007093
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007094 /* Send a netdev-add uevent to the new namespace */
7095 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007096 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +00007097
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007098 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07007099 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007100 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007101
7102 /* Add the device back in the hashes */
7103 list_netdevice(dev);
7104
7105 /* Notify protocols, that a new device appeared. */
7106 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7107
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007108 /*
7109 * Prevent userspace races by waiting until the network
7110 * device is fully setup before sending notifications.
7111 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07007112 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00007113
Eric W. Biedermance286d32007-09-12 13:53:49 +02007114 synchronize_net();
7115 err = 0;
7116out:
7117 return err;
7118}
Johannes Berg463d0182009-07-14 00:33:35 +02007119EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007120
Linus Torvalds1da177e2005-04-16 15:20:36 -07007121static int dev_cpu_callback(struct notifier_block *nfb,
7122 unsigned long action,
7123 void *ocpu)
7124{
7125 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007126 struct sk_buff *skb;
7127 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7128 struct softnet_data *sd, *oldsd;
7129
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007130 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007131 return NOTIFY_OK;
7132
7133 local_irq_disable();
7134 cpu = smp_processor_id();
7135 sd = &per_cpu(softnet_data, cpu);
7136 oldsd = &per_cpu(softnet_data, oldcpu);
7137
7138 /* Find end of our completion_queue. */
7139 list_skb = &sd->completion_queue;
7140 while (*list_skb)
7141 list_skb = &(*list_skb)->next;
7142 /* Append completion queue from offline CPU. */
7143 *list_skb = oldsd->completion_queue;
7144 oldsd->completion_queue = NULL;
7145
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00007147 if (oldsd->output_queue) {
7148 *sd->output_queue_tailp = oldsd->output_queue;
7149 sd->output_queue_tailp = oldsd->output_queue_tailp;
7150 oldsd->output_queue = NULL;
7151 oldsd->output_queue_tailp = &oldsd->output_queue;
7152 }
Eric Dumazetac64da02015-01-15 17:04:22 -08007153 /* Append NAPI poll list from offline CPU, with one exception :
7154 * process_backlog() must be called by cpu owning percpu backlog.
7155 * We properly handle process_queue & input_pkt_queue later.
7156 */
7157 while (!list_empty(&oldsd->poll_list)) {
7158 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7159 struct napi_struct,
7160 poll_list);
7161
7162 list_del_init(&napi->poll_list);
7163 if (napi->poll == process_backlog)
7164 napi->state = 0;
7165 else
7166 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +00007167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007168
7169 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7170 local_irq_enable();
7171
7172 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00007173 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08007174 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00007175 input_queue_head_incr(oldsd);
7176 }
Eric Dumazetac64da02015-01-15 17:04:22 -08007177 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -08007178 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00007179 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07007180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007181
7182 return NOTIFY_OK;
7183}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007184
7185
Herbert Xu7f353bf2007-08-10 15:47:58 -07007186/**
Herbert Xub63365a2008-10-23 01:11:29 -07007187 * netdev_increment_features - increment feature set by one
7188 * @all: current feature set
7189 * @one: new feature set
7190 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07007191 *
7192 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07007193 * @one to the master device with current feature set @all. Will not
7194 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07007195 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007196netdev_features_t netdev_increment_features(netdev_features_t all,
7197 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07007198{
Michał Mirosław1742f182011-04-22 06:31:16 +00007199 if (mask & NETIF_F_GEN_CSUM)
7200 mask |= NETIF_F_ALL_CSUM;
7201 mask |= NETIF_F_VLAN_CHALLENGED;
7202
7203 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7204 all &= one | ~NETIF_F_ALL_FOR_ALL;
7205
Michał Mirosław1742f182011-04-22 06:31:16 +00007206 /* If one device supports hw checksumming, set for all. */
7207 if (all & NETIF_F_GEN_CSUM)
7208 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007209
7210 return all;
7211}
Herbert Xub63365a2008-10-23 01:11:29 -07007212EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007213
Baruch Siach430f03c2013-06-02 20:43:55 +00007214static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007215{
7216 int i;
7217 struct hlist_head *hash;
7218
7219 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7220 if (hash != NULL)
7221 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7222 INIT_HLIST_HEAD(&hash[i]);
7223
7224 return hash;
7225}
7226
Eric W. Biederman881d9662007-09-17 11:56:21 -07007227/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07007228static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007229{
Rustad, Mark D734b6542012-07-18 09:06:07 +00007230 if (net != &init_net)
7231 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07007232
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007233 net->dev_name_head = netdev_create_hash();
7234 if (net->dev_name_head == NULL)
7235 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007236
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007237 net->dev_index_head = netdev_create_hash();
7238 if (net->dev_index_head == NULL)
7239 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007240
7241 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007242
7243err_idx:
7244 kfree(net->dev_name_head);
7245err_name:
7246 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007247}
7248
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007249/**
7250 * netdev_drivername - network driver for the device
7251 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007252 *
7253 * Determine network driver for device.
7254 */
David S. Miller3019de12011-06-06 16:41:33 -07007255const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07007256{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07007257 const struct device_driver *driver;
7258 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07007259 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07007260
7261 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007262 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07007263 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007264
7265 driver = parent->driver;
7266 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07007267 return driver->name;
7268 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007269}
7270
Joe Perches6ea754e2014-09-22 11:10:50 -07007271static void __netdev_printk(const char *level, const struct net_device *dev,
7272 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +00007273{
Joe Perchesb004ff42012-09-12 20:12:19 -07007274 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -07007275 dev_printk_emit(level[1] - '0',
7276 dev->dev.parent,
7277 "%s %s %s%s: %pV",
7278 dev_driver_string(dev->dev.parent),
7279 dev_name(dev->dev.parent),
7280 netdev_name(dev), netdev_reg_state(dev),
7281 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007282 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -07007283 printk("%s%s%s: %pV",
7284 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007285 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -07007286 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007287 }
Joe Perches256df2f2010-06-27 01:02:35 +00007288}
7289
Joe Perches6ea754e2014-09-22 11:10:50 -07007290void netdev_printk(const char *level, const struct net_device *dev,
7291 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +00007292{
7293 struct va_format vaf;
7294 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +00007295
7296 va_start(args, format);
7297
7298 vaf.fmt = format;
7299 vaf.va = &args;
7300
Joe Perches6ea754e2014-09-22 11:10:50 -07007301 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007302
Joe Perches256df2f2010-06-27 01:02:35 +00007303 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +00007304}
7305EXPORT_SYMBOL(netdev_printk);
7306
7307#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -07007308void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +00007309{ \
Joe Perches256df2f2010-06-27 01:02:35 +00007310 struct va_format vaf; \
7311 va_list args; \
7312 \
7313 va_start(args, fmt); \
7314 \
7315 vaf.fmt = fmt; \
7316 vaf.va = &args; \
7317 \
Joe Perches6ea754e2014-09-22 11:10:50 -07007318 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007319 \
Joe Perches256df2f2010-06-27 01:02:35 +00007320 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +00007321} \
7322EXPORT_SYMBOL(func);
7323
7324define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7325define_netdev_printk_level(netdev_alert, KERN_ALERT);
7326define_netdev_printk_level(netdev_crit, KERN_CRIT);
7327define_netdev_printk_level(netdev_err, KERN_ERR);
7328define_netdev_printk_level(netdev_warn, KERN_WARNING);
7329define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7330define_netdev_printk_level(netdev_info, KERN_INFO);
7331
Pavel Emelyanov46650792007-10-08 20:38:39 -07007332static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007333{
7334 kfree(net->dev_name_head);
7335 kfree(net->dev_index_head);
7336}
7337
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007338static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007339 .init = netdev_init,
7340 .exit = netdev_exit,
7341};
7342
Pavel Emelyanov46650792007-10-08 20:38:39 -07007343static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007344{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007345 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007346 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007347 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007348 * initial network namespace
7349 */
7350 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007351 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007352 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007353 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007354
7355 /* Ignore unmoveable devices (i.e. loopback) */
7356 if (dev->features & NETIF_F_NETNS_LOCAL)
7357 continue;
7358
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007359 /* Leave virtual devices for the generic cleanup */
7360 if (dev->rtnl_link_ops)
7361 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007362
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007363 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007364 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7365 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007366 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007367 pr_emerg("%s: failed to move %s to init_net: %d\n",
7368 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007369 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007370 }
7371 }
7372 rtnl_unlock();
7373}
7374
Eric W. Biederman50624c92013-09-23 21:19:49 -07007375static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7376{
7377 /* Return with the rtnl_lock held when there are no network
7378 * devices unregistering in any network namespace in net_list.
7379 */
7380 struct net *net;
7381 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +01007382 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007383
Peter Zijlstraff960a72014-10-29 17:04:56 +01007384 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007385 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -07007386 unregistering = false;
7387 rtnl_lock();
7388 list_for_each_entry(net, net_list, exit_list) {
7389 if (net->dev_unreg_count > 0) {
7390 unregistering = true;
7391 break;
7392 }
7393 }
7394 if (!unregistering)
7395 break;
7396 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +01007397
7398 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007399 }
Peter Zijlstraff960a72014-10-29 17:04:56 +01007400 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -07007401}
7402
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007403static void __net_exit default_device_exit_batch(struct list_head *net_list)
7404{
7405 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007406 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007407 * Do this across as many network namespaces as possible to
7408 * improve batching efficiency.
7409 */
7410 struct net_device *dev;
7411 struct net *net;
7412 LIST_HEAD(dev_kill_list);
7413
Eric W. Biederman50624c92013-09-23 21:19:49 -07007414 /* To prevent network device cleanup code from dereferencing
7415 * loopback devices or network devices that have been freed
7416 * wait here for all pending unregistrations to complete,
7417 * before unregistring the loopback device and allowing the
7418 * network namespace be freed.
7419 *
7420 * The netdev todo list containing all network devices
7421 * unregistrations that happen in default_device_exit_batch
7422 * will run in the rtnl_unlock() at the end of
7423 * default_device_exit_batch.
7424 */
7425 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007426 list_for_each_entry(net, net_list, exit_list) {
7427 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02007428 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007429 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7430 else
7431 unregister_netdevice_queue(dev, &dev_kill_list);
7432 }
7433 }
7434 unregister_netdevice_many(&dev_kill_list);
7435 rtnl_unlock();
7436}
7437
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007438static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007439 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007440 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007441};
7442
Linus Torvalds1da177e2005-04-16 15:20:36 -07007443/*
7444 * Initialize the DEV module. At boot time this walks the device list and
7445 * unhooks any devices that fail to initialise (normally hardware not
7446 * present) and leaves us with a valid list of present and active devices.
7447 *
7448 */
7449
7450/*
7451 * This is called single threaded during boot, so no need
7452 * to take the rtnl semaphore.
7453 */
7454static int __init net_dev_init(void)
7455{
7456 int i, rc = -ENOMEM;
7457
7458 BUG_ON(!dev_boot_phase);
7459
Linus Torvalds1da177e2005-04-16 15:20:36 -07007460 if (dev_proc_init())
7461 goto out;
7462
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007463 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007464 goto out;
7465
7466 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007467 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007468 INIT_LIST_HEAD(&ptype_base[i]);
7469
Vlad Yasevich62532da2012-11-15 08:49:10 +00007470 INIT_LIST_HEAD(&offload_base);
7471
Eric W. Biederman881d9662007-09-17 11:56:21 -07007472 if (register_pernet_subsys(&netdev_net_ops))
7473 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007474
7475 /*
7476 * Initialise the packet receive queues.
7477 */
7478
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007479 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007480 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007481
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007482 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007483 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007484 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007485 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007486#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007487 sd->csd.func = rps_trigger_softirq;
7488 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007489 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007490#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007491
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007492 sd->backlog.poll = process_backlog;
7493 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007494 }
7495
Linus Torvalds1da177e2005-04-16 15:20:36 -07007496 dev_boot_phase = 0;
7497
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007498 /* The loopback device is special if any other network devices
7499 * is present in a network namespace the loopback device must
7500 * be present. Since we now dynamically allocate and free the
7501 * loopback device ensure this invariant is maintained by
7502 * keeping the loopback device as the first device on the
7503 * list of network devices. Ensuring the loopback devices
7504 * is the first device that appears and the last network device
7505 * that disappears.
7506 */
7507 if (register_pernet_device(&loopback_net_ops))
7508 goto out;
7509
7510 if (register_pernet_device(&default_device_ops))
7511 goto out;
7512
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007513 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7514 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007515
7516 hotcpu_notifier(dev_cpu_callback, 0);
7517 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007518 rc = 0;
7519out:
7520 return rc;
7521}
7522
7523subsys_initcall(net_dev_init);