blob: ce01847793c0a0fd7b73cf02cf9cf21bbc96bf5c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300132#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700133#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100134#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700136#include "net-sysfs.h"
137
Herbert Xud565b0a2008-12-15 23:38:52 -0800138/* Instead of increasing this, you should create a hash table. */
139#define MAX_GRO_SKBS 8
140
Herbert Xu5d38a072009-01-04 16:13:40 -0800141/* This should be increased if a protocol with a bigger head is added. */
142#define GRO_MAX_HEAD (MAX_HEADER + 128)
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000145static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000146struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000148static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700151 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 * semaphore.
153 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800154 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 *
156 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700157 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 * actual updates. This allows pure readers to access the list even
159 * while a writer is preparing to update it.
160 *
161 * To put it another way, dev_base_lock is held for writing only to
162 * protect against pure readers; the rtnl semaphore provides the
163 * protection against other writers.
164 *
165 * See, for example usages, register_netdevice() and
166 * unregister_netdevice(), which must be called with the rtnl
167 * semaphore held.
168 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170EXPORT_SYMBOL(dev_base_lock);
171
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300172/* protects napi_hash addition/deletion and napi_gen_id */
173static DEFINE_SPINLOCK(napi_hash_lock);
174
175static unsigned int napi_gen_id;
176static DEFINE_HASHTABLE(napi_hash, 8);
177
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200178static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000179
Thomas Graf4e985ad2011-06-21 03:11:20 +0000180static inline void dev_base_seq_inc(struct net *net)
181{
182 while (++net->dev_base_seq == 0);
183}
184
Eric W. Biederman881d9662007-09-17 11:56:21 -0700185static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
Eric Dumazet95c96172012-04-15 05:58:06 +0000187 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
188
stephen hemminger08e98972009-11-10 07:20:34 +0000189 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
Eric W. Biederman881d9662007-09-17 11:56:21 -0700192static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700194 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000197static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000198{
199#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000200 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000201#endif
202}
203
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000204static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000205{
206#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000207 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000208#endif
209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000212static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800219 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000220 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000221 hlist_add_head_rcu(&dev->index_hlist,
222 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200223 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000224
225 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200226}
227
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000228/* Device list removal
229 * caller must respect a RCU grace period before freeing/reusing dev
230 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231static void unlist_netdevice(struct net_device *dev)
232{
233 ASSERT_RTNL();
234
235 /* Unlink dev from the device chain */
236 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800237 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000238 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000239 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200240 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000241
242 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243}
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245/*
246 * Our notifier list
247 */
248
Alan Sternf07d5b92006-05-09 15:23:03 -0700249static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251/*
252 * Device drivers call our routines to queue packets here. We empty the
253 * queue in the local softnet handler.
254 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700255
Eric Dumazet9958da02010-04-17 04:17:02 +0000256DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700257EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
David S. Millercf508b12008-07-22 14:16:42 -0700259#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700260/*
David S. Millerc773e842008-07-08 23:13:53 -0700261 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700262 * according to dev->type
263 */
264static const unsigned short netdev_lock_type[] =
265 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
266 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
267 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
268 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
269 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
270 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
271 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
272 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
273 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
274 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
275 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
276 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400277 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
278 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
279 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700280
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700281static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700282 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
283 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
284 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
285 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
286 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
287 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
288 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
289 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
290 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
291 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
292 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
293 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400294 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
295 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
296 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700297
298static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700299static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700300
301static inline unsigned short netdev_lock_pos(unsigned short dev_type)
302{
303 int i;
304
305 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
306 if (netdev_lock_type[i] == dev_type)
307 return i;
308 /* the last key is used by default */
309 return ARRAY_SIZE(netdev_lock_type) - 1;
310}
311
David S. Millercf508b12008-07-22 14:16:42 -0700312static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
313 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700314{
315 int i;
316
317 i = netdev_lock_pos(dev_type);
318 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
319 netdev_lock_name[i]);
320}
David S. Millercf508b12008-07-22 14:16:42 -0700321
322static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
323{
324 int i;
325
326 i = netdev_lock_pos(dev->type);
327 lockdep_set_class_and_name(&dev->addr_list_lock,
328 &netdev_addr_lock_key[i],
329 netdev_lock_name[i]);
330}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700331#else
David S. Millercf508b12008-07-22 14:16:42 -0700332static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
333 unsigned short dev_type)
334{
335}
336static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700337{
338}
339#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341/*******************************************************************************
342
343 Protocol management and registration routines
344
345*******************************************************************************/
346
347/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 * Add a protocol ID to the list. Now that the input handler is
349 * smarter we can dispense with all the messy stuff that used to be
350 * here.
351 *
352 * BEWARE!!! Protocol handlers, mangling input packets,
353 * MUST BE last in hash buckets and checking protocol handlers
354 * MUST start from promiscuous ptype_all chain in net_bh.
355 * It is true now, do not change it.
356 * Explanation follows: if protocol handler, mangling packet, will
357 * be the first on list, it is not able to sense, that packet
358 * is cloned and should be copied-on-write, so that it will
359 * change it and subsequent readers will get broken packet.
360 * --ANK (980803)
361 */
362
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000363static inline struct list_head *ptype_head(const struct packet_type *pt)
364{
365 if (pt->type == htons(ETH_P_ALL))
366 return &ptype_all;
367 else
368 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
369}
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371/**
372 * dev_add_pack - add packet handler
373 * @pt: packet type declaration
374 *
375 * Add a protocol handler to the networking stack. The passed &packet_type
376 * is linked into kernel lists and may not be freed until it has been
377 * removed from the kernel lists.
378 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900379 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 * guarantee all CPU's that are in middle of receiving packets
381 * will see the new packet type (until the next received packet).
382 */
383
384void dev_add_pack(struct packet_type *pt)
385{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000386 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000388 spin_lock(&ptype_lock);
389 list_add_rcu(&pt->list, head);
390 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700392EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394/**
395 * __dev_remove_pack - remove packet handler
396 * @pt: packet type declaration
397 *
398 * Remove a protocol handler that was previously added to the kernel
399 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
400 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900401 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 *
403 * The packet type might still be in use by receivers
404 * and must not be freed until after all the CPU's have gone
405 * through a quiescent state.
406 */
407void __dev_remove_pack(struct packet_type *pt)
408{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000409 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 struct packet_type *pt1;
411
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000412 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 list_for_each_entry(pt1, head, list) {
415 if (pt == pt1) {
416 list_del_rcu(&pt->list);
417 goto out;
418 }
419 }
420
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000421 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000423 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700425EXPORT_SYMBOL(__dev_remove_pack);
426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427/**
428 * dev_remove_pack - remove packet handler
429 * @pt: packet type declaration
430 *
431 * Remove a protocol handler that was previously added to the kernel
432 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
433 * from the kernel lists and can be freed or reused once this function
434 * returns.
435 *
436 * This call sleeps to guarantee that no CPU is looking at the packet
437 * type after return.
438 */
439void dev_remove_pack(struct packet_type *pt)
440{
441 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 synchronize_net();
444}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700445EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Vlad Yasevich62532da2012-11-15 08:49:10 +0000447
448/**
449 * dev_add_offload - register offload handlers
450 * @po: protocol offload declaration
451 *
452 * Add protocol offload handlers to the networking stack. The passed
453 * &proto_offload is linked into kernel lists and may not be freed until
454 * it has been removed from the kernel lists.
455 *
456 * This call does not sleep therefore it can not
457 * guarantee all CPU's that are in middle of receiving packets
458 * will see the new offload handlers (until the next received packet).
459 */
460void dev_add_offload(struct packet_offload *po)
461{
462 struct list_head *head = &offload_base;
463
464 spin_lock(&offload_lock);
465 list_add_rcu(&po->list, head);
466 spin_unlock(&offload_lock);
467}
468EXPORT_SYMBOL(dev_add_offload);
469
470/**
471 * __dev_remove_offload - remove offload handler
472 * @po: packet offload declaration
473 *
474 * Remove a protocol offload handler that was previously added to the
475 * kernel offload handlers by dev_add_offload(). The passed &offload_type
476 * is removed from the kernel lists and can be freed or reused once this
477 * function returns.
478 *
479 * The packet type might still be in use by receivers
480 * and must not be freed until after all the CPU's have gone
481 * through a quiescent state.
482 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800483static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000484{
485 struct list_head *head = &offload_base;
486 struct packet_offload *po1;
487
Eric Dumazetc53aa502012-11-16 08:08:23 +0000488 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000489
490 list_for_each_entry(po1, head, list) {
491 if (po == po1) {
492 list_del_rcu(&po->list);
493 goto out;
494 }
495 }
496
497 pr_warn("dev_remove_offload: %p not found\n", po);
498out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000499 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000500}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000501
502/**
503 * dev_remove_offload - remove packet offload handler
504 * @po: packet offload declaration
505 *
506 * Remove a packet offload handler that was previously added to the kernel
507 * offload handlers by dev_add_offload(). The passed &offload_type is
508 * removed from the kernel lists and can be freed or reused once this
509 * function returns.
510 *
511 * This call sleeps to guarantee that no CPU is looking at the packet
512 * type after return.
513 */
514void dev_remove_offload(struct packet_offload *po)
515{
516 __dev_remove_offload(po);
517
518 synchronize_net();
519}
520EXPORT_SYMBOL(dev_remove_offload);
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522/******************************************************************************
523
524 Device Boot-time Settings Routines
525
526*******************************************************************************/
527
528/* Boot time configuration table */
529static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
530
531/**
532 * netdev_boot_setup_add - add new setup entry
533 * @name: name of the device
534 * @map: configured settings for the device
535 *
536 * Adds new setup entry to the dev_boot_setup list. The function
537 * returns 0 on error and 1 on success. This is a generic routine to
538 * all netdevices.
539 */
540static int netdev_boot_setup_add(char *name, struct ifmap *map)
541{
542 struct netdev_boot_setup *s;
543 int i;
544
545 s = dev_boot_setup;
546 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
547 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
548 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700549 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 memcpy(&s[i].map, map, sizeof(s[i].map));
551 break;
552 }
553 }
554
555 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
556}
557
558/**
559 * netdev_boot_setup_check - check boot time settings
560 * @dev: the netdevice
561 *
562 * Check boot time settings for the device.
563 * The found settings are set for the device to be used
564 * later in the device probing.
565 * Returns 0 if no settings found, 1 if they are.
566 */
567int netdev_boot_setup_check(struct net_device *dev)
568{
569 struct netdev_boot_setup *s = dev_boot_setup;
570 int i;
571
572 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
573 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700574 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 dev->irq = s[i].map.irq;
576 dev->base_addr = s[i].map.base_addr;
577 dev->mem_start = s[i].map.mem_start;
578 dev->mem_end = s[i].map.mem_end;
579 return 1;
580 }
581 }
582 return 0;
583}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700584EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
586
587/**
588 * netdev_boot_base - get address from boot time settings
589 * @prefix: prefix for network device
590 * @unit: id for network device
591 *
592 * Check boot time settings for the base address of device.
593 * The found settings are set for the device to be used
594 * later in the device probing.
595 * Returns 0 if no settings found.
596 */
597unsigned long netdev_boot_base(const char *prefix, int unit)
598{
599 const struct netdev_boot_setup *s = dev_boot_setup;
600 char name[IFNAMSIZ];
601 int i;
602
603 sprintf(name, "%s%d", prefix, unit);
604
605 /*
606 * If device already registered then return base of 1
607 * to indicate not to probe for this interface
608 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700609 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 return 1;
611
612 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
613 if (!strcmp(name, s[i].name))
614 return s[i].map.base_addr;
615 return 0;
616}
617
618/*
619 * Saves at boot time configured settings for any netdevice.
620 */
621int __init netdev_boot_setup(char *str)
622{
623 int ints[5];
624 struct ifmap map;
625
626 str = get_options(str, ARRAY_SIZE(ints), ints);
627 if (!str || !*str)
628 return 0;
629
630 /* Save settings */
631 memset(&map, 0, sizeof(map));
632 if (ints[0] > 0)
633 map.irq = ints[1];
634 if (ints[0] > 1)
635 map.base_addr = ints[2];
636 if (ints[0] > 2)
637 map.mem_start = ints[3];
638 if (ints[0] > 3)
639 map.mem_end = ints[4];
640
641 /* Add new entry to the list */
642 return netdev_boot_setup_add(str, &map);
643}
644
645__setup("netdev=", netdev_boot_setup);
646
647/*******************************************************************************
648
649 Device Interface Subroutines
650
651*******************************************************************************/
652
653/**
654 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700655 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 * @name: name to find
657 *
658 * Find an interface by name. Must be called under RTNL semaphore
659 * or @dev_base_lock. If the name is found a pointer to the device
660 * is returned. If the name is not found then %NULL is returned. The
661 * reference counters are not incremented so the caller must be
662 * careful with locks.
663 */
664
Eric W. Biederman881d9662007-09-17 11:56:21 -0700665struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700667 struct net_device *dev;
668 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Sasha Levinb67bfe02013-02-27 17:06:00 -0800670 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (!strncmp(dev->name, name, IFNAMSIZ))
672 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 return NULL;
675}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700676EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000679 * dev_get_by_name_rcu - find a device by its name
680 * @net: the applicable net namespace
681 * @name: name to find
682 *
683 * Find an interface by name.
684 * If the name is found a pointer to the device is returned.
685 * If the name is not found then %NULL is returned.
686 * The reference counters are not incremented so the caller must be
687 * careful with locks. The caller must hold RCU lock.
688 */
689
690struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
691{
Eric Dumazet72c95282009-10-30 07:11:27 +0000692 struct net_device *dev;
693 struct hlist_head *head = dev_name_hash(net, name);
694
Sasha Levinb67bfe02013-02-27 17:06:00 -0800695 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000696 if (!strncmp(dev->name, name, IFNAMSIZ))
697 return dev;
698
699 return NULL;
700}
701EXPORT_SYMBOL(dev_get_by_name_rcu);
702
703/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700705 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 * @name: name to find
707 *
708 * Find an interface by name. This can be called from any
709 * context and does its own locking. The returned handle has
710 * the usage count incremented and the caller must use dev_put() to
711 * release it when it is no longer needed. %NULL is returned if no
712 * matching device is found.
713 */
714
Eric W. Biederman881d9662007-09-17 11:56:21 -0700715struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 struct net_device *dev;
718
Eric Dumazet72c95282009-10-30 07:11:27 +0000719 rcu_read_lock();
720 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (dev)
722 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000723 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 return dev;
725}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700726EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728/**
729 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700730 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 * @ifindex: index of device
732 *
733 * Search for an interface by index. Returns %NULL if the device
734 * is not found or a pointer to the device. The device has not
735 * had its reference counter increased so the caller must be careful
736 * about locking. The caller must hold either the RTNL semaphore
737 * or @dev_base_lock.
738 */
739
Eric W. Biederman881d9662007-09-17 11:56:21 -0700740struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700742 struct net_device *dev;
743 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Sasha Levinb67bfe02013-02-27 17:06:00 -0800745 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (dev->ifindex == ifindex)
747 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700748
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 return NULL;
750}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700751EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000753/**
754 * dev_get_by_index_rcu - find a device by its ifindex
755 * @net: the applicable net namespace
756 * @ifindex: index of device
757 *
758 * Search for an interface by index. Returns %NULL if the device
759 * is not found or a pointer to the device. The device has not
760 * had its reference counter increased so the caller must be careful
761 * about locking. The caller must hold RCU lock.
762 */
763
764struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
765{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000766 struct net_device *dev;
767 struct hlist_head *head = dev_index_hash(net, ifindex);
768
Sasha Levinb67bfe02013-02-27 17:06:00 -0800769 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000770 if (dev->ifindex == ifindex)
771 return dev;
772
773 return NULL;
774}
775EXPORT_SYMBOL(dev_get_by_index_rcu);
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778/**
779 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700780 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 * @ifindex: index of device
782 *
783 * Search for an interface by index. Returns NULL if the device
784 * is not found or a pointer to the device. The device returned has
785 * had a reference added and the pointer is safe until the user calls
786 * dev_put to indicate they have finished with it.
787 */
788
Eric W. Biederman881d9662007-09-17 11:56:21 -0700789struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
791 struct net_device *dev;
792
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000793 rcu_read_lock();
794 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (dev)
796 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000797 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 return dev;
799}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700800EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200803 * netdev_get_name - get a netdevice name, knowing its ifindex.
804 * @net: network namespace
805 * @name: a pointer to the buffer where the name will be stored.
806 * @ifindex: the ifindex of the interface to get the name from.
807 *
808 * The use of raw_seqcount_begin() and cond_resched() before
809 * retrying is required as we want to give the writers a chance
810 * to complete when CONFIG_PREEMPT is not set.
811 */
812int netdev_get_name(struct net *net, char *name, int ifindex)
813{
814 struct net_device *dev;
815 unsigned int seq;
816
817retry:
818 seq = raw_seqcount_begin(&devnet_rename_seq);
819 rcu_read_lock();
820 dev = dev_get_by_index_rcu(net, ifindex);
821 if (!dev) {
822 rcu_read_unlock();
823 return -ENODEV;
824 }
825
826 strcpy(name, dev->name);
827 rcu_read_unlock();
828 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
829 cond_resched();
830 goto retry;
831 }
832
833 return 0;
834}
835
836/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000837 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700838 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 * @type: media type of device
840 * @ha: hardware address
841 *
842 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800843 * is not found or a pointer to the device.
844 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000845 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * and the caller must therefore be careful about locking
847 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 */
849
Eric Dumazet941666c2010-12-05 01:23:53 +0000850struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
851 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
853 struct net_device *dev;
854
Eric Dumazet941666c2010-12-05 01:23:53 +0000855 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (dev->type == type &&
857 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700858 return dev;
859
860 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
Eric Dumazet941666c2010-12-05 01:23:53 +0000862EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300863
Eric W. Biederman881d9662007-09-17 11:56:21 -0700864struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700865{
866 struct net_device *dev;
867
868 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700869 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700870 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700871 return dev;
872
873 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700874}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875EXPORT_SYMBOL(__dev_getfirstbyhwtype);
876
Eric W. Biederman881d9662007-09-17 11:56:21 -0700877struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000879 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000881 rcu_read_lock();
882 for_each_netdev_rcu(net, dev)
883 if (dev->type == type) {
884 dev_hold(dev);
885 ret = dev;
886 break;
887 }
888 rcu_read_unlock();
889 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891EXPORT_SYMBOL(dev_getfirstbyhwtype);
892
893/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000894 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700895 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 * @if_flags: IFF_* values
897 * @mask: bitmask of bits in if_flags to check
898 *
899 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000900 * is not found or a pointer to the device. Must be called inside
901 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 */
903
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000904struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700905 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700907 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Pavel Emelianov7562f872007-05-03 15:13:45 -0700909 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800910 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700912 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914 }
915 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700916 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000918EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
920/**
921 * dev_valid_name - check if name is okay for network device
922 * @name: name string
923 *
924 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700925 * to allow sysfs to work. We also disallow any kind of
926 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 */
David S. Miller95f050b2012-03-06 16:12:15 -0500928bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700930 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500931 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700932 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500933 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700934 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500935 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700936
937 while (*name) {
938 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500939 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700940 name++;
941 }
David S. Miller95f050b2012-03-06 16:12:15 -0500942 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700944EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200947 * __dev_alloc_name - allocate a name for a device
948 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200950 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 *
952 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700953 * id. It scans list of devices to build up a free map, then chooses
954 * the first empty slot. The caller must hold the dev_base or rtnl lock
955 * while allocating the name and adding the device in order to avoid
956 * duplicates.
957 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
958 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 */
960
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200961static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
963 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 const char *p;
965 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700966 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct net_device *d;
968
969 p = strnchr(name, IFNAMSIZ-1, '%');
970 if (p) {
971 /*
972 * Verify the string as this thing may have come from
973 * the user. There must be either one "%d" and no other "%"
974 * characters.
975 */
976 if (p[1] != 'd' || strchr(p + 2, '%'))
977 return -EINVAL;
978
979 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700980 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (!inuse)
982 return -ENOMEM;
983
Eric W. Biederman881d9662007-09-17 11:56:21 -0700984 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 if (!sscanf(d->name, name, &i))
986 continue;
987 if (i < 0 || i >= max_netdevices)
988 continue;
989
990 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200991 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (!strncmp(buf, d->name, IFNAMSIZ))
993 set_bit(i, inuse);
994 }
995
996 i = find_first_zero_bit(inuse, max_netdevices);
997 free_page((unsigned long) inuse);
998 }
999
Octavian Purdilad9031022009-11-18 02:36:59 +00001000 if (buf != name)
1001 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001002 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 /* It is possible to run out of possible slots
1006 * when the name is long and there isn't enough space left
1007 * for the digits, or if all bits are used.
1008 */
1009 return -ENFILE;
1010}
1011
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001012/**
1013 * dev_alloc_name - allocate a name for a device
1014 * @dev: device
1015 * @name: name format string
1016 *
1017 * Passed a format string - eg "lt%d" it will try and find a suitable
1018 * id. It scans list of devices to build up a free map, then chooses
1019 * the first empty slot. The caller must hold the dev_base or rtnl lock
1020 * while allocating the name and adding the device in order to avoid
1021 * duplicates.
1022 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1023 * Returns the number of the unit assigned or a negative errno code.
1024 */
1025
1026int dev_alloc_name(struct net_device *dev, const char *name)
1027{
1028 char buf[IFNAMSIZ];
1029 struct net *net;
1030 int ret;
1031
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001032 BUG_ON(!dev_net(dev));
1033 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001034 ret = __dev_alloc_name(net, name, buf);
1035 if (ret >= 0)
1036 strlcpy(dev->name, buf, IFNAMSIZ);
1037 return ret;
1038}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001039EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001040
Gao feng828de4f2012-09-13 20:58:27 +00001041static int dev_alloc_name_ns(struct net *net,
1042 struct net_device *dev,
1043 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001044{
Gao feng828de4f2012-09-13 20:58:27 +00001045 char buf[IFNAMSIZ];
1046 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001047
Gao feng828de4f2012-09-13 20:58:27 +00001048 ret = __dev_alloc_name(net, name, buf);
1049 if (ret >= 0)
1050 strlcpy(dev->name, buf, IFNAMSIZ);
1051 return ret;
1052}
1053
1054static int dev_get_valid_name(struct net *net,
1055 struct net_device *dev,
1056 const char *name)
1057{
1058 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001059
Octavian Purdilad9031022009-11-18 02:36:59 +00001060 if (!dev_valid_name(name))
1061 return -EINVAL;
1062
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001063 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001064 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 else if (__dev_get_by_name(net, name))
1066 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001067 else if (dev->name != name)
1068 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001069
1070 return 0;
1071}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
1073/**
1074 * dev_change_name - change name of a device
1075 * @dev: device
1076 * @newname: name (or format string) must be at least IFNAMSIZ
1077 *
1078 * Change name of a device, can pass format strings "eth%d".
1079 * for wildcarding.
1080 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001081int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
Herbert Xufcc5a032007-07-30 17:03:38 -07001083 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001085 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001086 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001089 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001091 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if (dev->flags & IFF_UP)
1093 return -EBUSY;
1094
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001095 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001096
1097 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001098 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001099 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001100 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001101
Herbert Xufcc5a032007-07-30 17:03:38 -07001102 memcpy(oldname, dev->name, IFNAMSIZ);
1103
Gao feng828de4f2012-09-13 20:58:27 +00001104 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001105 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001106 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001107 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Herbert Xufcc5a032007-07-30 17:03:38 -07001110rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001111 ret = device_rename(&dev->dev, dev->name);
1112 if (ret) {
1113 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001114 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001115 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001116 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001117
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001118 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001119
Herbert Xu7f988ea2007-07-30 16:35:46 -07001120 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001121 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001122 write_unlock_bh(&dev_base_lock);
1123
1124 synchronize_rcu();
1125
1126 write_lock_bh(&dev_base_lock);
1127 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001128 write_unlock_bh(&dev_base_lock);
1129
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001130 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001131 ret = notifier_to_errno(ret);
1132
1133 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001134 /* err >= 0 after dev_alloc_name() or stores the first errno */
1135 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001136 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001137 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001138 memcpy(dev->name, oldname, IFNAMSIZ);
1139 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001140 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001141 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001142 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001143 }
1144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 return err;
1147}
1148
1149/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001150 * dev_set_alias - change ifalias of a device
1151 * @dev: device
1152 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001153 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001154 *
1155 * Set ifalias for a device,
1156 */
1157int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1158{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001159 char *new_ifalias;
1160
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001161 ASSERT_RTNL();
1162
1163 if (len >= IFALIASZ)
1164 return -EINVAL;
1165
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001166 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001167 kfree(dev->ifalias);
1168 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001169 return 0;
1170 }
1171
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001172 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1173 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001174 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001175 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001176
1177 strlcpy(dev->ifalias, alias, len+1);
1178 return len;
1179}
1180
1181
1182/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001183 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001184 * @dev: device to cause notification
1185 *
1186 * Called to indicate a device has changed features.
1187 */
1188void netdev_features_change(struct net_device *dev)
1189{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001190 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001191}
1192EXPORT_SYMBOL(netdev_features_change);
1193
1194/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 * netdev_state_change - device changes state
1196 * @dev: device to cause notification
1197 *
1198 * Called to indicate a device has changed state. This function calls
1199 * the notifier chains for netdev_chain and sends a NEWLINK message
1200 * to the routing socket.
1201 */
1202void netdev_state_change(struct net_device *dev)
1203{
1204 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001205 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001206 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 }
1208}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001209EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Amerigo Wangee89bab2012-08-09 22:14:56 +00001211/**
1212 * netdev_notify_peers - notify network peers about existence of @dev
1213 * @dev: network device
1214 *
1215 * Generate traffic such that interested network peers are aware of
1216 * @dev, such as by generating a gratuitous ARP. This may be used when
1217 * a device wants to inform the rest of the network about some sort of
1218 * reconfiguration such as a failover event or virtual machine
1219 * migration.
1220 */
1221void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001222{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001223 rtnl_lock();
1224 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1225 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001226}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001227EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001228
Patrick McHardybd380812010-02-26 06:34:53 +00001229static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001231 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001232 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001234 ASSERT_RTNL();
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 if (!netif_device_present(dev))
1237 return -ENODEV;
1238
Neil Hormanca99ca12013-02-05 08:05:43 +00001239 /* Block netpoll from trying to do any rx path servicing.
1240 * If we don't do this there is a chance ndo_poll_controller
1241 * or ndo_poll may be running while we open the device
1242 */
dingtianhongda6e3782013-05-27 19:53:31 +00001243 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001244
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001245 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1246 ret = notifier_to_errno(ret);
1247 if (ret)
1248 return ret;
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001251
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001252 if (ops->ndo_validate_addr)
1253 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001254
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001255 if (!ret && ops->ndo_open)
1256 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
Neil Hormanca99ca12013-02-05 08:05:43 +00001258 netpoll_rx_enable(dev);
1259
Jeff Garzikbada3392007-10-23 20:19:37 -07001260 if (ret)
1261 clear_bit(__LINK_STATE_START, &dev->state);
1262 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001264 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001265 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001267 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 return ret;
1271}
Patrick McHardybd380812010-02-26 06:34:53 +00001272
1273/**
1274 * dev_open - prepare an interface for use.
1275 * @dev: device to open
1276 *
1277 * Takes a device from down to up state. The device's private open
1278 * function is invoked and then the multicast lists are loaded. Finally
1279 * the device is moved into the up state and a %NETDEV_UP message is
1280 * sent to the netdev notifier chain.
1281 *
1282 * Calling this function on an active interface is a nop. On a failure
1283 * a negative errno code is returned.
1284 */
1285int dev_open(struct net_device *dev)
1286{
1287 int ret;
1288
Patrick McHardybd380812010-02-26 06:34:53 +00001289 if (dev->flags & IFF_UP)
1290 return 0;
1291
Patrick McHardybd380812010-02-26 06:34:53 +00001292 ret = __dev_open(dev);
1293 if (ret < 0)
1294 return ret;
1295
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001296 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001297 call_netdevice_notifiers(NETDEV_UP, dev);
1298
1299 return ret;
1300}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001301EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Octavian Purdila44345722010-12-13 12:44:07 +00001303static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304{
Octavian Purdila44345722010-12-13 12:44:07 +00001305 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001306
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001307 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001308 might_sleep();
1309
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001310 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001311 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Octavian Purdila44345722010-12-13 12:44:07 +00001313 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
Octavian Purdila44345722010-12-13 12:44:07 +00001315 /* Synchronize to scheduled poll. We cannot touch poll list, it
1316 * can be even on different cpu. So just clear netif_running().
1317 *
1318 * dev->stop() will invoke napi_disable() on all of it's
1319 * napi_struct instances on this device.
1320 */
1321 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Octavian Purdila44345722010-12-13 12:44:07 +00001324 dev_deactivate_many(head);
1325
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001326 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001327 const struct net_device_ops *ops = dev->netdev_ops;
1328
1329 /*
1330 * Call the device specific close. This cannot fail.
1331 * Only if device is UP
1332 *
1333 * We allow it to be called even after a DETACH hot-plug
1334 * event.
1335 */
1336 if (ops->ndo_stop)
1337 ops->ndo_stop(dev);
1338
Octavian Purdila44345722010-12-13 12:44:07 +00001339 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001340 net_dmaengine_put();
1341 }
1342
1343 return 0;
1344}
1345
1346static int __dev_close(struct net_device *dev)
1347{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001348 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001349 LIST_HEAD(single);
1350
Neil Hormanca99ca12013-02-05 08:05:43 +00001351 /* Temporarily disable netpoll until the interface is down */
dingtianhongda6e3782013-05-27 19:53:31 +00001352 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001353
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001354 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001355 retval = __dev_close_many(&single);
1356 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001357
1358 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001359 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001360}
1361
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001362static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001363{
1364 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001365
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001366 /* Remove the devices that don't need to be closed */
1367 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001368 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001369 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001370
1371 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001372
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001373 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001375 call_netdevice_notifiers(NETDEV_DOWN, dev);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001376 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 return 0;
1380}
Patrick McHardybd380812010-02-26 06:34:53 +00001381
1382/**
1383 * dev_close - shutdown an interface.
1384 * @dev: device to shutdown
1385 *
1386 * This function moves an active device into down state. A
1387 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1388 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1389 * chain.
1390 */
1391int dev_close(struct net_device *dev)
1392{
Eric Dumazete14a5992011-05-10 12:26:06 -07001393 if (dev->flags & IFF_UP) {
1394 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001395
Neil Hormanca99ca12013-02-05 08:05:43 +00001396 /* Block netpoll rx while the interface is going down */
dingtianhongda6e3782013-05-27 19:53:31 +00001397 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001398
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001399 list_add(&dev->close_list, &single);
Eric Dumazete14a5992011-05-10 12:26:06 -07001400 dev_close_many(&single);
1401 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001402
1403 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001404 }
dingtianhongda6e3782013-05-27 19:53:31 +00001405 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001406}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001407EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001410/**
1411 * dev_disable_lro - disable Large Receive Offload on a device
1412 * @dev: device
1413 *
1414 * Disable Large Receive Offload (LRO) on a net device. Must be
1415 * called under RTNL. This is needed if received packets may be
1416 * forwarded to another interface.
1417 */
1418void dev_disable_lro(struct net_device *dev)
1419{
Neil Hormanf11970e2011-05-24 08:31:09 +00001420 /*
1421 * If we're trying to disable lro on a vlan device
1422 * use the underlying physical device instead
1423 */
1424 if (is_vlan_dev(dev))
1425 dev = vlan_dev_real_dev(dev);
1426
Michal Kubeček529d0482013-11-15 06:18:50 +01001427 /* the same for macvlan devices */
1428 if (netif_is_macvlan(dev))
1429 dev = macvlan_dev_real_dev(dev);
1430
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001431 dev->wanted_features &= ~NETIF_F_LRO;
1432 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001433
Michał Mirosław22d59692011-04-21 12:42:15 +00001434 if (unlikely(dev->features & NETIF_F_LRO))
1435 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001436}
1437EXPORT_SYMBOL(dev_disable_lro);
1438
Jiri Pirko351638e2013-05-28 01:30:21 +00001439static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1440 struct net_device *dev)
1441{
1442 struct netdev_notifier_info info;
1443
1444 netdev_notifier_info_init(&info, dev);
1445 return nb->notifier_call(nb, val, &info);
1446}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001447
Eric W. Biederman881d9662007-09-17 11:56:21 -07001448static int dev_boot_phase = 1;
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450/**
1451 * register_netdevice_notifier - register a network notifier block
1452 * @nb: notifier
1453 *
1454 * Register a notifier to be called when network device events occur.
1455 * The notifier passed is linked into the kernel structures and must
1456 * not be reused until it has been unregistered. A negative errno code
1457 * is returned on a failure.
1458 *
1459 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001460 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 * view of the network device list.
1462 */
1463
1464int register_netdevice_notifier(struct notifier_block *nb)
1465{
1466 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001467 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001468 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 int err;
1470
1471 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001472 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001473 if (err)
1474 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001475 if (dev_boot_phase)
1476 goto unlock;
1477 for_each_net(net) {
1478 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001479 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001480 err = notifier_to_errno(err);
1481 if (err)
1482 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Eric W. Biederman881d9662007-09-17 11:56:21 -07001484 if (!(dev->flags & IFF_UP))
1485 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001486
Jiri Pirko351638e2013-05-28 01:30:21 +00001487 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001490
1491unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 rtnl_unlock();
1493 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001494
1495rollback:
1496 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001497 for_each_net(net) {
1498 for_each_netdev(net, dev) {
1499 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001500 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001501
Eric W. Biederman881d9662007-09-17 11:56:21 -07001502 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001503 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1504 dev);
1505 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001506 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001507 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001508 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001510
RongQing.Li8f891482011-11-30 23:43:07 -05001511outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001512 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001513 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001515EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516
1517/**
1518 * unregister_netdevice_notifier - unregister a network notifier block
1519 * @nb: notifier
1520 *
1521 * Unregister a notifier previously registered by
1522 * register_netdevice_notifier(). The notifier is unlinked into the
1523 * kernel structures and may then be reused. A negative errno code
1524 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001525 *
1526 * After unregistering unregister and down device events are synthesized
1527 * for all devices on the device list to the removed notifier to remove
1528 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 */
1530
1531int unregister_netdevice_notifier(struct notifier_block *nb)
1532{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001533 struct net_device *dev;
1534 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001535 int err;
1536
1537 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001538 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001539 if (err)
1540 goto unlock;
1541
1542 for_each_net(net) {
1543 for_each_netdev(net, dev) {
1544 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001545 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1546 dev);
1547 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001548 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001549 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001550 }
1551 }
1552unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001553 rtnl_unlock();
1554 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001556EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
1558/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001559 * call_netdevice_notifiers_info - call all network notifier blocks
1560 * @val: value passed unmodified to notifier function
1561 * @dev: net_device pointer passed unmodified to notifier function
1562 * @info: notifier information data
1563 *
1564 * Call all network notifier blocks. Parameters and return value
1565 * are as for raw_notifier_call_chain().
1566 */
1567
stephen hemminger1d143d92013-12-29 14:01:29 -08001568static int call_netdevice_notifiers_info(unsigned long val,
1569 struct net_device *dev,
1570 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001571{
1572 ASSERT_RTNL();
1573 netdev_notifier_info_init(info, dev);
1574 return raw_notifier_call_chain(&netdev_chain, val, info);
1575}
Jiri Pirko351638e2013-05-28 01:30:21 +00001576
1577/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 * call_netdevice_notifiers - call all network notifier blocks
1579 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001580 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 *
1582 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001583 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 */
1585
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001586int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587{
Jiri Pirko351638e2013-05-28 01:30:21 +00001588 struct netdev_notifier_info info;
1589
1590 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001592EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
Ingo Molnarc5905af2012-02-24 08:31:31 +01001594static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001595#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001596/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001597 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001598 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001599 */
1600static atomic_t netstamp_needed_deferred;
1601#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602
1603void net_enable_timestamp(void)
1604{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001605#ifdef HAVE_JUMP_LABEL
1606 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1607
1608 if (deferred) {
1609 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001610 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001611 return;
1612 }
1613#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001614 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001616EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
1618void net_disable_timestamp(void)
1619{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001620#ifdef HAVE_JUMP_LABEL
1621 if (in_interrupt()) {
1622 atomic_inc(&netstamp_needed_deferred);
1623 return;
1624 }
1625#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001626 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001628EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629
Eric Dumazet3b098e22010-05-15 23:57:10 -07001630static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631{
Eric Dumazet588f0332011-11-15 04:12:55 +00001632 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001633 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001634 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635}
1636
Eric Dumazet588f0332011-11-15 04:12:55 +00001637#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001638 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001639 if ((COND) && !(SKB)->tstamp.tv64) \
1640 __net_timestamp(SKB); \
1641 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001642
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001643static inline bool is_skb_forwardable(struct net_device *dev,
1644 struct sk_buff *skb)
1645{
1646 unsigned int len;
1647
1648 if (!(dev->flags & IFF_UP))
1649 return false;
1650
1651 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1652 if (skb->len <= len)
1653 return true;
1654
1655 /* if TSO is enabled, we don't care about the length as the packet
1656 * could be forwarded without being segmented before
1657 */
1658 if (skb_is_gso(skb))
1659 return true;
1660
1661 return false;
1662}
1663
Arnd Bergmann44540962009-11-26 06:07:08 +00001664/**
1665 * dev_forward_skb - loopback an skb to another netif
1666 *
1667 * @dev: destination network device
1668 * @skb: buffer to forward
1669 *
1670 * return values:
1671 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001672 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001673 *
1674 * dev_forward_skb can be used for injecting an skb from the
1675 * start_xmit function of one device into the receive queue
1676 * of another device.
1677 *
1678 * The receiving device may be in another namespace, so
1679 * we have to clear all information in the skb that could
1680 * impact namespace isolation.
1681 */
1682int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1683{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001684 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1685 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1686 atomic_long_inc(&dev->rx_dropped);
1687 kfree_skb(skb);
1688 return NET_RX_DROP;
1689 }
1690 }
1691
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001692 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001693 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001694 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001695 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001696 }
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001697
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02001698 skb_scrub_packet(skb, true);
Alexei Starovoitov81b9eab2013-11-12 14:39:13 -08001699 skb->protocol = eth_type_trans(skb, dev);
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001700
Arnd Bergmann44540962009-11-26 06:07:08 +00001701 return netif_rx(skb);
1702}
1703EXPORT_SYMBOL_GPL(dev_forward_skb);
1704
Changli Gao71d9dec2010-12-15 19:57:25 +00001705static inline int deliver_skb(struct sk_buff *skb,
1706 struct packet_type *pt_prev,
1707 struct net_device *orig_dev)
1708{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001709 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1710 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001711 atomic_inc(&skb->users);
1712 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1713}
1714
Eric Leblondc0de08d2012-08-16 22:02:58 +00001715static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1716{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001717 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001718 return false;
1719
1720 if (ptype->id_match)
1721 return ptype->id_match(ptype, skb->sk);
1722 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1723 return true;
1724
1725 return false;
1726}
1727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728/*
1729 * Support routine. Sends outgoing frames to any network
1730 * taps currently in use.
1731 */
1732
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001733static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734{
1735 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001736 struct sk_buff *skb2 = NULL;
1737 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 rcu_read_lock();
1740 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1741 /* Never send packets back to the socket
1742 * they originated from - MvS (miquels@drinkel.ow.org)
1743 */
1744 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001745 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001746 if (pt_prev) {
1747 deliver_skb(skb2, pt_prev, skb->dev);
1748 pt_prev = ptype;
1749 continue;
1750 }
1751
1752 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 if (!skb2)
1754 break;
1755
Eric Dumazet70978182010-12-20 21:22:51 +00001756 net_timestamp_set(skb2);
1757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 /* skb->nh should be correctly
1759 set by sender, so that the second statement is
1760 just protection against buggy protocols.
1761 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001762 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001764 if (skb_network_header(skb2) < skb2->data ||
Simon Hormanced14f62013-05-28 20:34:25 +00001765 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
Joe Perchese87cc472012-05-13 21:56:26 +00001766 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1767 ntohs(skb2->protocol),
1768 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001769 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 }
1771
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001772 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001774 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 }
1776 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001777 if (pt_prev)
1778 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 rcu_read_unlock();
1780}
1781
Ben Hutchings2c530402012-07-10 10:55:09 +00001782/**
1783 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001784 * @dev: Network device
1785 * @txq: number of queues available
1786 *
1787 * If real_num_tx_queues is changed the tc mappings may no longer be
1788 * valid. To resolve this verify the tc mapping remains valid and if
1789 * not NULL the mapping. With no priorities mapping to this
1790 * offset/count pair it will no longer be used. In the worst case TC0
1791 * is invalid nothing can be done so disable priority mappings. If is
1792 * expected that drivers will fix this mapping if they can before
1793 * calling netif_set_real_num_tx_queues.
1794 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001795static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001796{
1797 int i;
1798 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1799
1800 /* If TC0 is invalidated disable TC mapping */
1801 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001802 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001803 dev->num_tc = 0;
1804 return;
1805 }
1806
1807 /* Invalidated prio to tc mappings set to TC0 */
1808 for (i = 1; i < TC_BITMASK + 1; i++) {
1809 int q = netdev_get_prio_tc_map(dev, i);
1810
1811 tc = &dev->tc_to_txq[q];
1812 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001813 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1814 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001815 netdev_set_prio_tc_map(dev, i, 0);
1816 }
1817 }
1818}
1819
Alexander Duyck537c00d2013-01-10 08:57:02 +00001820#ifdef CONFIG_XPS
1821static DEFINE_MUTEX(xps_map_mutex);
1822#define xmap_dereference(P) \
1823 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1824
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001825static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1826 int cpu, u16 index)
1827{
1828 struct xps_map *map = NULL;
1829 int pos;
1830
1831 if (dev_maps)
1832 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1833
1834 for (pos = 0; map && pos < map->len; pos++) {
1835 if (map->queues[pos] == index) {
1836 if (map->len > 1) {
1837 map->queues[pos] = map->queues[--map->len];
1838 } else {
1839 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1840 kfree_rcu(map, rcu);
1841 map = NULL;
1842 }
1843 break;
1844 }
1845 }
1846
1847 return map;
1848}
1849
Alexander Duyck024e9672013-01-10 08:57:46 +00001850static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001851{
1852 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001853 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001854 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001855
1856 mutex_lock(&xps_map_mutex);
1857 dev_maps = xmap_dereference(dev->xps_maps);
1858
1859 if (!dev_maps)
1860 goto out_no_maps;
1861
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001862 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001863 for (i = index; i < dev->num_tx_queues; i++) {
1864 if (!remove_xps_queue(dev_maps, cpu, i))
1865 break;
1866 }
1867 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001868 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001869 }
1870
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001871 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001872 RCU_INIT_POINTER(dev->xps_maps, NULL);
1873 kfree_rcu(dev_maps, rcu);
1874 }
1875
Alexander Duyck024e9672013-01-10 08:57:46 +00001876 for (i = index; i < dev->num_tx_queues; i++)
1877 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1878 NUMA_NO_NODE);
1879
Alexander Duyck537c00d2013-01-10 08:57:02 +00001880out_no_maps:
1881 mutex_unlock(&xps_map_mutex);
1882}
1883
Alexander Duyck01c5f862013-01-10 08:57:35 +00001884static struct xps_map *expand_xps_map(struct xps_map *map,
1885 int cpu, u16 index)
1886{
1887 struct xps_map *new_map;
1888 int alloc_len = XPS_MIN_MAP_ALLOC;
1889 int i, pos;
1890
1891 for (pos = 0; map && pos < map->len; pos++) {
1892 if (map->queues[pos] != index)
1893 continue;
1894 return map;
1895 }
1896
1897 /* Need to add queue to this CPU's existing map */
1898 if (map) {
1899 if (pos < map->alloc_len)
1900 return map;
1901
1902 alloc_len = map->alloc_len * 2;
1903 }
1904
1905 /* Need to allocate new map to store queue on this CPU's map */
1906 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1907 cpu_to_node(cpu));
1908 if (!new_map)
1909 return NULL;
1910
1911 for (i = 0; i < pos; i++)
1912 new_map->queues[i] = map->queues[i];
1913 new_map->alloc_len = alloc_len;
1914 new_map->len = pos;
1915
1916 return new_map;
1917}
1918
Michael S. Tsirkin35735402013-10-02 09:14:06 +03001919int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1920 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001921{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001922 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001923 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001924 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001925 int cpu, numa_node_id = -2;
1926 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001927
1928 mutex_lock(&xps_map_mutex);
1929
1930 dev_maps = xmap_dereference(dev->xps_maps);
1931
Alexander Duyck01c5f862013-01-10 08:57:35 +00001932 /* allocate memory for queue storage */
1933 for_each_online_cpu(cpu) {
1934 if (!cpumask_test_cpu(cpu, mask))
1935 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001936
Alexander Duyck01c5f862013-01-10 08:57:35 +00001937 if (!new_dev_maps)
1938 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001939 if (!new_dev_maps) {
1940 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001941 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001942 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001943
1944 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1945 NULL;
1946
1947 map = expand_xps_map(map, cpu, index);
1948 if (!map)
1949 goto error;
1950
1951 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1952 }
1953
1954 if (!new_dev_maps)
1955 goto out_no_new_maps;
1956
1957 for_each_possible_cpu(cpu) {
1958 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1959 /* add queue to CPU maps */
1960 int pos = 0;
1961
1962 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1963 while ((pos < map->len) && (map->queues[pos] != index))
1964 pos++;
1965
1966 if (pos == map->len)
1967 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001968#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001969 if (numa_node_id == -2)
1970 numa_node_id = cpu_to_node(cpu);
1971 else if (numa_node_id != cpu_to_node(cpu))
1972 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001973#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001974 } else if (dev_maps) {
1975 /* fill in the new device map from the old device map */
1976 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1977 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001978 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001979
Alexander Duyck537c00d2013-01-10 08:57:02 +00001980 }
1981
Alexander Duyck01c5f862013-01-10 08:57:35 +00001982 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1983
Alexander Duyck537c00d2013-01-10 08:57:02 +00001984 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001985 if (dev_maps) {
1986 for_each_possible_cpu(cpu) {
1987 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1988 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1989 if (map && map != new_map)
1990 kfree_rcu(map, rcu);
1991 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001992
Alexander Duyck537c00d2013-01-10 08:57:02 +00001993 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001994 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001995
Alexander Duyck01c5f862013-01-10 08:57:35 +00001996 dev_maps = new_dev_maps;
1997 active = true;
1998
1999out_no_new_maps:
2000 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002001 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2002 (numa_node_id >= 0) ? numa_node_id :
2003 NUMA_NO_NODE);
2004
Alexander Duyck01c5f862013-01-10 08:57:35 +00002005 if (!dev_maps)
2006 goto out_no_maps;
2007
2008 /* removes queue from unused CPUs */
2009 for_each_possible_cpu(cpu) {
2010 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2011 continue;
2012
2013 if (remove_xps_queue(dev_maps, cpu, index))
2014 active = true;
2015 }
2016
2017 /* free map if not active */
2018 if (!active) {
2019 RCU_INIT_POINTER(dev->xps_maps, NULL);
2020 kfree_rcu(dev_maps, rcu);
2021 }
2022
2023out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002024 mutex_unlock(&xps_map_mutex);
2025
2026 return 0;
2027error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002028 /* remove any maps that we added */
2029 for_each_possible_cpu(cpu) {
2030 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2031 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2032 NULL;
2033 if (new_map && new_map != map)
2034 kfree(new_map);
2035 }
2036
Alexander Duyck537c00d2013-01-10 08:57:02 +00002037 mutex_unlock(&xps_map_mutex);
2038
Alexander Duyck537c00d2013-01-10 08:57:02 +00002039 kfree(new_dev_maps);
2040 return -ENOMEM;
2041}
2042EXPORT_SYMBOL(netif_set_xps_queue);
2043
2044#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002045/*
2046 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2047 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2048 */
Tom Herberte6484932010-10-18 18:04:39 +00002049int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002050{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002051 int rc;
2052
Tom Herberte6484932010-10-18 18:04:39 +00002053 if (txq < 1 || txq > dev->num_tx_queues)
2054 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002055
Ben Hutchings5c565802011-02-15 19:39:21 +00002056 if (dev->reg_state == NETREG_REGISTERED ||
2057 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002058 ASSERT_RTNL();
2059
Tom Herbert1d24eb42010-11-21 13:17:27 +00002060 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2061 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002062 if (rc)
2063 return rc;
2064
John Fastabend4f57c082011-01-17 08:06:04 +00002065 if (dev->num_tc)
2066 netif_setup_tc(dev, txq);
2067
Alexander Duyck024e9672013-01-10 08:57:46 +00002068 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002069 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002070#ifdef CONFIG_XPS
2071 netif_reset_xps_queues_gt(dev, txq);
2072#endif
2073 }
John Fastabendf0796d52010-07-01 13:21:57 +00002074 }
Tom Herberte6484932010-10-18 18:04:39 +00002075
2076 dev->real_num_tx_queues = txq;
2077 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002078}
2079EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002080
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002081#ifdef CONFIG_RPS
2082/**
2083 * netif_set_real_num_rx_queues - set actual number of RX queues used
2084 * @dev: Network device
2085 * @rxq: Actual number of RX queues
2086 *
2087 * This must be called either with the rtnl_lock held or before
2088 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002089 * negative error code. If called before registration, it always
2090 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002091 */
2092int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2093{
2094 int rc;
2095
Tom Herbertbd25fa72010-10-18 18:00:16 +00002096 if (rxq < 1 || rxq > dev->num_rx_queues)
2097 return -EINVAL;
2098
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002099 if (dev->reg_state == NETREG_REGISTERED) {
2100 ASSERT_RTNL();
2101
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002102 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2103 rxq);
2104 if (rc)
2105 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002106 }
2107
2108 dev->real_num_rx_queues = rxq;
2109 return 0;
2110}
2111EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2112#endif
2113
Ben Hutchings2c530402012-07-10 10:55:09 +00002114/**
2115 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002116 *
2117 * This routine should set an upper limit on the number of RSS queues
2118 * used by default by multiqueue devices.
2119 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002120int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002121{
2122 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2123}
2124EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2125
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002126static inline void __netif_reschedule(struct Qdisc *q)
2127{
2128 struct softnet_data *sd;
2129 unsigned long flags;
2130
2131 local_irq_save(flags);
2132 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002133 q->next_sched = NULL;
2134 *sd->output_queue_tailp = q;
2135 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002136 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2137 local_irq_restore(flags);
2138}
2139
David S. Miller37437bb2008-07-16 02:15:04 -07002140void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002141{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002142 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2143 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002144}
2145EXPORT_SYMBOL(__netif_schedule);
2146
Eric Dumazete6247022013-12-05 04:45:08 -08002147struct dev_kfree_skb_cb {
2148 enum skb_free_reason reason;
2149};
2150
2151static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002152{
Eric Dumazete6247022013-12-05 04:45:08 -08002153 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002154}
Denis Vlasenko56079432006-03-29 15:57:29 -08002155
Eric Dumazete6247022013-12-05 04:45:08 -08002156void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2157{
2158 unsigned long flags;
2159
2160 if (likely(atomic_read(&skb->users) == 1)) {
2161 smp_rmb();
2162 atomic_set(&skb->users, 0);
2163 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2164 return;
2165 }
2166 get_kfree_skb_cb(skb)->reason = reason;
2167 local_irq_save(flags);
2168 skb->next = __this_cpu_read(softnet_data.completion_queue);
2169 __this_cpu_write(softnet_data.completion_queue, skb);
2170 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2171 local_irq_restore(flags);
2172}
2173EXPORT_SYMBOL(__dev_kfree_skb_irq);
2174
2175void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002176{
2177 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002178 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002179 else
2180 dev_kfree_skb(skb);
2181}
Eric Dumazete6247022013-12-05 04:45:08 -08002182EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002183
2184
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002185/**
2186 * netif_device_detach - mark device as removed
2187 * @dev: network device
2188 *
2189 * Mark device as removed from system and therefore no longer available.
2190 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002191void netif_device_detach(struct net_device *dev)
2192{
2193 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2194 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002195 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002196 }
2197}
2198EXPORT_SYMBOL(netif_device_detach);
2199
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002200/**
2201 * netif_device_attach - mark device as attached
2202 * @dev: network device
2203 *
2204 * Mark device as attached from system and restart if needed.
2205 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002206void netif_device_attach(struct net_device *dev)
2207{
2208 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2209 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002210 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002211 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002212 }
2213}
2214EXPORT_SYMBOL(netif_device_attach);
2215
Ben Hutchings36c92472012-01-17 07:57:56 +00002216static void skb_warn_bad_offload(const struct sk_buff *skb)
2217{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002218 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002219 struct net_device *dev = skb->dev;
2220 const char *driver = "";
2221
Ben Greearc846ad92013-04-19 10:45:52 +00002222 if (!net_ratelimit())
2223 return;
2224
Ben Hutchings36c92472012-01-17 07:57:56 +00002225 if (dev && dev->dev.parent)
2226 driver = dev_driver_string(dev->dev.parent);
2227
2228 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2229 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002230 driver, dev ? &dev->features : &null_features,
2231 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002232 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2233 skb_shinfo(skb)->gso_type, skb->ip_summed);
2234}
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236/*
2237 * Invalidate hardware checksum when packet is to be mangled, and
2238 * complete checksum manually on outgoing path.
2239 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002240int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241{
Al Virod3bc23e2006-11-14 21:24:49 -08002242 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002243 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
Patrick McHardy84fa7932006-08-29 16:44:56 -07002245 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002246 goto out_set_summed;
2247
2248 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002249 skb_warn_bad_offload(skb);
2250 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 }
2252
Eric Dumazetcef401d2013-01-25 20:34:37 +00002253 /* Before computing a checksum, we should make sure no frag could
2254 * be modified by an external entity : checksum could be wrong.
2255 */
2256 if (skb_has_shared_frag(skb)) {
2257 ret = __skb_linearize(skb);
2258 if (ret)
2259 goto out;
2260 }
2261
Michał Mirosław55508d62010-12-14 15:24:08 +00002262 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002263 BUG_ON(offset >= skb_headlen(skb));
2264 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2265
2266 offset += skb->csum_offset;
2267 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2268
2269 if (skb_cloned(skb) &&
2270 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2272 if (ret)
2273 goto out;
2274 }
2275
Herbert Xua0308472007-10-15 01:47:15 -07002276 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002277out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002279out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 return ret;
2281}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002282EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002284__be16 skb_network_protocol(struct sk_buff *skb)
2285{
2286 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002287 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002288
Pravin B Shelar19acc322013-05-07 20:41:07 +00002289 /* Tunnel gso handlers can set protocol to ethernet. */
2290 if (type == htons(ETH_P_TEB)) {
2291 struct ethhdr *eth;
2292
2293 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2294 return 0;
2295
2296 eth = (struct ethhdr *)skb_mac_header(skb);
2297 type = eth->h_proto;
2298 }
2299
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002300 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002301 struct vlan_hdr *vh;
2302
2303 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2304 return 0;
2305
2306 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2307 type = vh->h_vlan_encapsulated_proto;
2308 vlan_depth += VLAN_HLEN;
2309 }
2310
2311 return type;
2312}
2313
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002314/**
2315 * skb_mac_gso_segment - mac layer segmentation handler.
2316 * @skb: buffer to segment
2317 * @features: features for the output path (see dev->features)
2318 */
2319struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2320 netdev_features_t features)
2321{
2322 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2323 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002324 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002325
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002326 if (unlikely(!type))
2327 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002328
2329 __skb_pull(skb, skb->mac_len);
2330
2331 rcu_read_lock();
2332 list_for_each_entry_rcu(ptype, &offload_base, list) {
2333 if (ptype->type == type && ptype->callbacks.gso_segment) {
2334 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2335 int err;
2336
2337 err = ptype->callbacks.gso_send_check(skb);
2338 segs = ERR_PTR(err);
2339 if (err || skb_gso_ok(skb, features))
2340 break;
2341 __skb_push(skb, (skb->data -
2342 skb_network_header(skb)));
2343 }
2344 segs = ptype->callbacks.gso_segment(skb, features);
2345 break;
2346 }
2347 }
2348 rcu_read_unlock();
2349
2350 __skb_push(skb, skb->data - skb_mac_header(skb));
2351
2352 return segs;
2353}
2354EXPORT_SYMBOL(skb_mac_gso_segment);
2355
2356
Cong Wang12b00042013-02-05 16:36:38 +00002357/* openvswitch calls this on rx path, so we need a different check.
2358 */
2359static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2360{
2361 if (tx_path)
2362 return skb->ip_summed != CHECKSUM_PARTIAL;
2363 else
2364 return skb->ip_summed == CHECKSUM_NONE;
2365}
2366
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002367/**
Cong Wang12b00042013-02-05 16:36:38 +00002368 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002369 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002370 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002371 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002372 *
2373 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002374 *
2375 * It may return NULL if the skb requires no segmentation. This is
2376 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002377 */
Cong Wang12b00042013-02-05 16:36:38 +00002378struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2379 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002380{
Cong Wang12b00042013-02-05 16:36:38 +00002381 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002382 int err;
2383
Ben Hutchings36c92472012-01-17 07:57:56 +00002384 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002385
Herbert Xua430a432006-07-08 13:34:56 -07002386 if (skb_header_cloned(skb) &&
2387 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2388 return ERR_PTR(err);
2389 }
2390
Pravin B Shelar68c33162013-02-14 14:02:41 +00002391 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002392 SKB_GSO_CB(skb)->encap_level = 0;
2393
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002394 skb_reset_mac_header(skb);
2395 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002396
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002397 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002398}
Cong Wang12b00042013-02-05 16:36:38 +00002399EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002400
Herbert Xufb286bb2005-11-10 13:01:24 -08002401/* Take action when hardware reception checksum errors are detected. */
2402#ifdef CONFIG_BUG
2403void netdev_rx_csum_fault(struct net_device *dev)
2404{
2405 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002406 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002407 dump_stack();
2408 }
2409}
2410EXPORT_SYMBOL(netdev_rx_csum_fault);
2411#endif
2412
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413/* Actually, we should eliminate this check as soon as we know, that:
2414 * 1. IOMMU is present and allows to map all the memory.
2415 * 2. No high memory really exists on this machine.
2416 */
2417
Eric Dumazet9092c652010-04-02 13:34:49 -07002418static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002420#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002422 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002423 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2424 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2425 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002426 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002427 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002430 if (PCI_DMA_BUS_IS_PHYS) {
2431 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
Eric Dumazet9092c652010-04-02 13:34:49 -07002433 if (!pdev)
2434 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002435 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002436 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2437 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002438 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2439 return 1;
2440 }
2441 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002442#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 return 0;
2444}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002446struct dev_gso_cb {
2447 void (*destructor)(struct sk_buff *skb);
2448};
2449
2450#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2451
2452static void dev_gso_skb_destructor(struct sk_buff *skb)
2453{
2454 struct dev_gso_cb *cb;
2455
Eric Dumazet289dccb2013-12-20 14:29:08 -08002456 kfree_skb_list(skb->next);
2457 skb->next = NULL;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002458
2459 cb = DEV_GSO_CB(skb);
2460 if (cb->destructor)
2461 cb->destructor(skb);
2462}
2463
2464/**
2465 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2466 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002467 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002468 *
2469 * This function segments the given skb and stores the list of segments
2470 * in skb->next.
2471 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002472static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002473{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002474 struct sk_buff *segs;
2475
Herbert Xu576a30e2006-06-27 13:22:38 -07002476 segs = skb_gso_segment(skb, features);
2477
2478 /* Verifying header integrity only. */
2479 if (!segs)
2480 return 0;
2481
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002482 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002483 return PTR_ERR(segs);
2484
2485 skb->next = segs;
2486 DEV_GSO_CB(skb)->destructor = skb->destructor;
2487 skb->destructor = dev_gso_skb_destructor;
2488
2489 return 0;
2490}
2491
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002492static netdev_features_t harmonize_features(struct sk_buff *skb,
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002493 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002494{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002495 if (skb->ip_summed != CHECKSUM_NONE &&
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002496 !can_checksum_protocol(features, skb_network_protocol(skb))) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002497 features &= ~NETIF_F_ALL_CSUM;
Jesse Grossf01a5232011-01-09 06:23:31 +00002498 } else if (illegal_highdma(skb->dev, skb)) {
2499 features &= ~NETIF_F_SG;
2500 }
2501
2502 return features;
2503}
2504
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002505netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002506{
2507 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002508 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002509
Ben Hutchings30b678d2012-07-30 15:57:00 +00002510 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2511 features &= ~NETIF_F_GSO_MASK;
2512
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002513 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002514 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2515 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002516 } else if (!vlan_tx_tag_present(skb)) {
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002517 return harmonize_features(skb, features);
Jesse Grossf01a5232011-01-09 06:23:31 +00002518 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002519
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002520 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2521 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002522
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002523 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
Jesse Grossf01a5232011-01-09 06:23:31 +00002524 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002525 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2526 NETIF_F_HW_VLAN_STAG_TX;
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002527
2528 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002529}
Jesse Grossf01a5232011-01-09 06:23:31 +00002530EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002531
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002532int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002533 struct netdev_queue *txq, void *accel_priv)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002534{
Stephen Hemminger00829822008-11-20 20:14:53 -08002535 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002536 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002537 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002538
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002539 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002540 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002541
Eric Dumazet93f154b2009-05-18 22:19:19 -07002542 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002543 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002544 * its hot in this cpu cache
2545 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002546 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2547 skb_dst_drop(skb);
2548
Jesse Grossfc741212011-01-09 06:23:32 +00002549 features = netif_skb_features(skb);
2550
Jesse Gross7b9c6092010-10-20 13:56:04 +00002551 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002552 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2553 skb = __vlan_put_tag(skb, skb->vlan_proto,
2554 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002555 if (unlikely(!skb))
2556 goto out;
2557
2558 skb->vlan_tci = 0;
2559 }
2560
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002561 /* If encapsulation offload request, verify we are testing
2562 * hardware encapsulation features instead of standard
2563 * features for the netdev
2564 */
2565 if (skb->encapsulation)
2566 features &= dev->hw_enc_features;
2567
Jesse Grossfc741212011-01-09 06:23:32 +00002568 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002569 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002570 goto out_kfree_skb;
2571 if (skb->next)
2572 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002573 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002574 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002575 __skb_linearize(skb))
2576 goto out_kfree_skb;
2577
2578 /* If packet is not checksummed and device does not
2579 * support checksumming for this protocol, complete
2580 * checksumming here.
2581 */
2582 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002583 if (skb->encapsulation)
2584 skb_set_inner_transport_header(skb,
2585 skb_checksum_start_offset(skb));
2586 else
2587 skb_set_transport_header(skb,
2588 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002589 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002590 skb_checksum_help(skb))
2591 goto out_kfree_skb;
2592 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002593 }
2594
Eric Dumazetb40863c2012-09-18 20:44:49 +00002595 if (!list_empty(&ptype_all))
2596 dev_queue_xmit_nit(skb, dev);
2597
Koki Sanagiec764bf2011-05-30 21:48:34 +00002598 skb_len = skb->len;
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002599 if (accel_priv)
2600 rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
2601 else
2602 rc = ops->ndo_start_xmit(skb, dev);
2603
Koki Sanagiec764bf2011-05-30 21:48:34 +00002604 trace_net_dev_xmit(skb, rc, dev, skb_len);
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002605 if (rc == NETDEV_TX_OK && txq)
Eric Dumazet08baf562009-05-25 22:58:01 -07002606 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002607 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002608 }
2609
Herbert Xu576a30e2006-06-27 13:22:38 -07002610gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002611 do {
2612 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002613
2614 skb->next = nskb->next;
2615 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002616
Eric Dumazetb40863c2012-09-18 20:44:49 +00002617 if (!list_empty(&ptype_all))
2618 dev_queue_xmit_nit(nskb, dev);
2619
Koki Sanagiec764bf2011-05-30 21:48:34 +00002620 skb_len = nskb->len;
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002621 if (accel_priv)
2622 rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
2623 else
2624 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002625 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002626 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002627 if (rc & ~NETDEV_TX_MASK)
2628 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002629 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002630 skb->next = nskb;
2631 return rc;
2632 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002633 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002634 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002635 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002636 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002637
Patrick McHardy572a9d72009-11-10 06:14:14 +00002638out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002639 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002640 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002641 consume_skb(skb);
2642 return rc;
2643 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002644out_kfree_skb:
2645 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002646out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002647 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002648}
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002649EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002650
Eric Dumazet1def9232013-01-10 12:36:42 +00002651static void qdisc_pkt_len_init(struct sk_buff *skb)
2652{
2653 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2654
2655 qdisc_skb_cb(skb)->pkt_len = skb->len;
2656
2657 /* To get more precise estimation of bytes sent on wire,
2658 * we add to pkt_len the headers size of all segments
2659 */
2660 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002661 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002662 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002663
Eric Dumazet757b8b12013-01-15 21:14:21 -08002664 /* mac layer + network layer */
2665 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2666
2667 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002668 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2669 hdr_len += tcp_hdrlen(skb);
2670 else
2671 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002672
2673 if (shinfo->gso_type & SKB_GSO_DODGY)
2674 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2675 shinfo->gso_size);
2676
2677 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002678 }
2679}
2680
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002681static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2682 struct net_device *dev,
2683 struct netdev_queue *txq)
2684{
2685 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002686 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002687 int rc;
2688
Eric Dumazet1def9232013-01-10 12:36:42 +00002689 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002690 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002691 /*
2692 * Heuristic to force contended enqueues to serialize on a
2693 * separate lock before trying to get qdisc main lock.
2694 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2695 * and dequeue packets faster.
2696 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002697 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002698 if (unlikely(contended))
2699 spin_lock(&q->busylock);
2700
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002701 spin_lock(root_lock);
2702 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2703 kfree_skb(skb);
2704 rc = NET_XMIT_DROP;
2705 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002706 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002707 /*
2708 * This is a work-conserving queue; there are no old skbs
2709 * waiting to be sent out; and the qdisc is not running -
2710 * xmit the skb directly.
2711 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002712 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2713 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002714
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002715 qdisc_bstats_update(q, skb);
2716
Eric Dumazet79640a42010-06-02 05:09:29 -07002717 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2718 if (unlikely(contended)) {
2719 spin_unlock(&q->busylock);
2720 contended = false;
2721 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002722 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002723 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002724 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002725
2726 rc = NET_XMIT_SUCCESS;
2727 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002728 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002729 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002730 if (qdisc_run_begin(q)) {
2731 if (unlikely(contended)) {
2732 spin_unlock(&q->busylock);
2733 contended = false;
2734 }
2735 __qdisc_run(q);
2736 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002737 }
2738 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002739 if (unlikely(contended))
2740 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002741 return rc;
2742}
2743
Daniel Borkmann86f85152013-12-29 17:27:11 +01002744#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00002745static void skb_update_prio(struct sk_buff *skb)
2746{
Igor Maravic6977a792011-11-25 07:44:54 +00002747 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002748
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002749 if (!skb->priority && skb->sk && map) {
2750 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2751
2752 if (prioidx < map->priomap_len)
2753 skb->priority = map->priomap[prioidx];
2754 }
Neil Horman5bc14212011-11-22 05:10:51 +00002755}
2756#else
2757#define skb_update_prio(skb)
2758#endif
2759
Eric Dumazet745e20f2010-09-29 13:23:09 -07002760static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002761#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002762
Dave Jonesd29f7492008-07-22 14:09:06 -07002763/**
Michel Machado95603e22012-06-12 10:16:35 +00002764 * dev_loopback_xmit - loop back @skb
2765 * @skb: buffer to transmit
2766 */
2767int dev_loopback_xmit(struct sk_buff *skb)
2768{
2769 skb_reset_mac_header(skb);
2770 __skb_pull(skb, skb_network_offset(skb));
2771 skb->pkt_type = PACKET_LOOPBACK;
2772 skb->ip_summed = CHECKSUM_UNNECESSARY;
2773 WARN_ON(!skb_dst(skb));
2774 skb_dst_force(skb);
2775 netif_rx_ni(skb);
2776 return 0;
2777}
2778EXPORT_SYMBOL(dev_loopback_xmit);
2779
2780/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002781 * dev_queue_xmit - transmit a buffer
2782 * @skb: buffer to transmit
2783 *
2784 * Queue a buffer for transmission to a network device. The caller must
2785 * have set the device and priority and built the buffer before calling
2786 * this function. The function can be called from an interrupt.
2787 *
2788 * A negative errno code is returned on a failure. A success does not
2789 * guarantee the frame will be transmitted as it may be dropped due
2790 * to congestion or traffic shaping.
2791 *
2792 * -----------------------------------------------------------------------------------
2793 * I notice this method can also return errors from the queue disciplines,
2794 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2795 * be positive.
2796 *
2797 * Regardless of the return value, the skb is consumed, so it is currently
2798 * difficult to retry a send to this method. (You can bump the ref count
2799 * before sending to hold a reference for retry if you are careful.)
2800 *
2801 * When calling this method, interrupts MUST be enabled. This is because
2802 * the BH enable code must have IRQs enabled so that it will not deadlock.
2803 * --BLG
2804 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805int dev_queue_xmit(struct sk_buff *skb)
2806{
2807 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002808 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 struct Qdisc *q;
2810 int rc = -ENOMEM;
2811
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002812 skb_reset_mac_header(skb);
2813
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002814 /* Disable soft irqs for various locks below. Also
2815 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002817 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
Neil Horman5bc14212011-11-22 05:10:51 +00002819 skb_update_prio(skb);
2820
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002821 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002822 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002823
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002825 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002827 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002829 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002830 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 }
2832
2833 /* The device has no queue. Common case for software devices:
2834 loopback, all the sorts of tunnels...
2835
Herbert Xu932ff272006-06-09 12:20:56 -07002836 Really, it is unlikely that netif_tx_lock protection is necessary
2837 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 counters.)
2839 However, it is possible, that they rely on protection
2840 made by us here.
2841
2842 Check this and shot the lock. It is not prone from deadlocks.
2843 Either shot noqueue qdisc, it is even simpler 8)
2844 */
2845 if (dev->flags & IFF_UP) {
2846 int cpu = smp_processor_id(); /* ok because BHs are off */
2847
David S. Millerc773e842008-07-08 23:13:53 -07002848 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849
Eric Dumazet745e20f2010-09-29 13:23:09 -07002850 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2851 goto recursion_alert;
2852
David S. Millerc773e842008-07-08 23:13:53 -07002853 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
Tom Herbert734664982011-11-28 16:32:44 +00002855 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002856 __this_cpu_inc(xmit_recursion);
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002857 rc = dev_hard_start_xmit(skb, dev, txq, NULL);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002858 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002859 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002860 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 goto out;
2862 }
2863 }
David S. Millerc773e842008-07-08 23:13:53 -07002864 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002865 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2866 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 } else {
2868 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002869 * unfortunately
2870 */
2871recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002872 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2873 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 }
2875 }
2876
2877 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002878 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 kfree_skb(skb);
2881 return rc;
2882out:
Herbert Xud4828d82006-06-22 02:28:18 -07002883 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 return rc;
2885}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002886EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
2888
2889/*=======================================================================
2890 Receiver routines
2891 =======================================================================*/
2892
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002893int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002894EXPORT_SYMBOL(netdev_max_backlog);
2895
Eric Dumazet3b098e22010-05-15 23:57:10 -07002896int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002897int netdev_budget __read_mostly = 300;
2898int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002900/* Called with irq disabled */
2901static inline void ____napi_schedule(struct softnet_data *sd,
2902 struct napi_struct *napi)
2903{
2904 list_add_tail(&napi->poll_list, &sd->poll_list);
2905 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2906}
2907
Eric Dumazetdf334542010-03-24 19:13:54 +00002908#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002909
2910/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002911struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002912EXPORT_SYMBOL(rps_sock_flow_table);
2913
Ingo Molnarc5905af2012-02-24 08:31:31 +01002914struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002915
Ben Hutchingsc4454772011-01-19 11:03:53 +00002916static struct rps_dev_flow *
2917set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2918 struct rps_dev_flow *rflow, u16 next_cpu)
2919{
Ben Hutchings09994d12011-10-03 04:42:46 +00002920 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002921#ifdef CONFIG_RFS_ACCEL
2922 struct netdev_rx_queue *rxqueue;
2923 struct rps_dev_flow_table *flow_table;
2924 struct rps_dev_flow *old_rflow;
2925 u32 flow_id;
2926 u16 rxq_index;
2927 int rc;
2928
2929 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002930 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2931 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002932 goto out;
2933 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2934 if (rxq_index == skb_get_rx_queue(skb))
2935 goto out;
2936
2937 rxqueue = dev->_rx + rxq_index;
2938 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2939 if (!flow_table)
2940 goto out;
2941 flow_id = skb->rxhash & flow_table->mask;
2942 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2943 rxq_index, flow_id);
2944 if (rc < 0)
2945 goto out;
2946 old_rflow = rflow;
2947 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002948 rflow->filter = rc;
2949 if (old_rflow->filter == rflow->filter)
2950 old_rflow->filter = RPS_NO_FILTER;
2951 out:
2952#endif
2953 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002954 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002955 }
2956
Ben Hutchings09994d12011-10-03 04:42:46 +00002957 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002958 return rflow;
2959}
2960
Tom Herbert0a9627f2010-03-16 08:03:29 +00002961/*
2962 * get_rps_cpu is called from netif_receive_skb and returns the target
2963 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002964 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002965 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002966static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2967 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002968{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002969 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002970 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002971 struct rps_dev_flow_table *flow_table;
2972 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002973 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002974 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002975
Tom Herbert0a9627f2010-03-16 08:03:29 +00002976 if (skb_rx_queue_recorded(skb)) {
2977 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002978 if (unlikely(index >= dev->real_num_rx_queues)) {
2979 WARN_ONCE(dev->real_num_rx_queues > 1,
2980 "%s received packet on queue %u, but number "
2981 "of RX queues is %u\n",
2982 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002983 goto done;
2984 }
2985 rxqueue = dev->_rx + index;
2986 } else
2987 rxqueue = dev->_rx;
2988
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002989 map = rcu_dereference(rxqueue->rps_map);
2990 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08002991 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00002992 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00002993 tcpu = map->cpus[0];
2994 if (cpu_online(tcpu))
2995 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002996 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00002997 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00002998 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00002999 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003000 }
3001
Changli Gao2d47b452010-08-17 19:00:56 +00003002 skb_reset_network_header(skb);
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003003 if (!skb_get_hash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003004 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003005
Tom Herbertfec5e652010-04-16 16:01:27 -07003006 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3007 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3008 if (flow_table && sock_flow_table) {
3009 u16 next_cpu;
3010 struct rps_dev_flow *rflow;
3011
3012 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3013 tcpu = rflow->cpu;
3014
3015 next_cpu = sock_flow_table->ents[skb->rxhash &
3016 sock_flow_table->mask];
3017
3018 /*
3019 * If the desired CPU (where last recvmsg was done) is
3020 * different from current CPU (one in the rx-queue flow
3021 * table entry), switch if one of the following holds:
3022 * - Current CPU is unset (equal to RPS_NO_CPU).
3023 * - Current CPU is offline.
3024 * - The current CPU's queue tail has advanced beyond the
3025 * last packet that was enqueued using this table entry.
3026 * This guarantees that all previous packets for the flow
3027 * have been dequeued, thus preserving in order delivery.
3028 */
3029 if (unlikely(tcpu != next_cpu) &&
3030 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3031 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003032 rflow->last_qtail)) >= 0)) {
3033 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003034 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003035 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003036
Tom Herbertfec5e652010-04-16 16:01:27 -07003037 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3038 *rflowp = rflow;
3039 cpu = tcpu;
3040 goto done;
3041 }
3042 }
3043
Tom Herbert0a9627f2010-03-16 08:03:29 +00003044 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003045 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003046
3047 if (cpu_online(tcpu)) {
3048 cpu = tcpu;
3049 goto done;
3050 }
3051 }
3052
3053done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003054 return cpu;
3055}
3056
Ben Hutchingsc4454772011-01-19 11:03:53 +00003057#ifdef CONFIG_RFS_ACCEL
3058
3059/**
3060 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3061 * @dev: Device on which the filter was set
3062 * @rxq_index: RX queue index
3063 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3064 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3065 *
3066 * Drivers that implement ndo_rx_flow_steer() should periodically call
3067 * this function for each installed filter and remove the filters for
3068 * which it returns %true.
3069 */
3070bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3071 u32 flow_id, u16 filter_id)
3072{
3073 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3074 struct rps_dev_flow_table *flow_table;
3075 struct rps_dev_flow *rflow;
3076 bool expire = true;
3077 int cpu;
3078
3079 rcu_read_lock();
3080 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3081 if (flow_table && flow_id <= flow_table->mask) {
3082 rflow = &flow_table->flows[flow_id];
3083 cpu = ACCESS_ONCE(rflow->cpu);
3084 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3085 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3086 rflow->last_qtail) <
3087 (int)(10 * flow_table->mask)))
3088 expire = false;
3089 }
3090 rcu_read_unlock();
3091 return expire;
3092}
3093EXPORT_SYMBOL(rps_may_expire_flow);
3094
3095#endif /* CONFIG_RFS_ACCEL */
3096
Tom Herbert0a9627f2010-03-16 08:03:29 +00003097/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003098static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003099{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003100 struct softnet_data *sd = data;
3101
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003102 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003103 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003104}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003105
Tom Herbertfec5e652010-04-16 16:01:27 -07003106#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003107
3108/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003109 * Check if this softnet_data structure is another cpu one
3110 * If yes, queue it to our IPI list and return 1
3111 * If no, return 0
3112 */
3113static int rps_ipi_queued(struct softnet_data *sd)
3114{
3115#ifdef CONFIG_RPS
3116 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3117
3118 if (sd != mysd) {
3119 sd->rps_ipi_next = mysd->rps_ipi_list;
3120 mysd->rps_ipi_list = sd;
3121
3122 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3123 return 1;
3124 }
3125#endif /* CONFIG_RPS */
3126 return 0;
3127}
3128
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003129#ifdef CONFIG_NET_FLOW_LIMIT
3130int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3131#endif
3132
3133static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3134{
3135#ifdef CONFIG_NET_FLOW_LIMIT
3136 struct sd_flow_limit *fl;
3137 struct softnet_data *sd;
3138 unsigned int old_flow, new_flow;
3139
3140 if (qlen < (netdev_max_backlog >> 1))
3141 return false;
3142
3143 sd = &__get_cpu_var(softnet_data);
3144
3145 rcu_read_lock();
3146 fl = rcu_dereference(sd->flow_limit);
3147 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003148 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003149 old_flow = fl->history[fl->history_head];
3150 fl->history[fl->history_head] = new_flow;
3151
3152 fl->history_head++;
3153 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3154
3155 if (likely(fl->buckets[old_flow]))
3156 fl->buckets[old_flow]--;
3157
3158 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3159 fl->count++;
3160 rcu_read_unlock();
3161 return true;
3162 }
3163 }
3164 rcu_read_unlock();
3165#endif
3166 return false;
3167}
3168
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003169/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003170 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3171 * queue (may be a remote CPU queue).
3172 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003173static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3174 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003175{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003176 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003177 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003178 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003179
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003180 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003181
3182 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003183
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003184 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003185 qlen = skb_queue_len(&sd->input_pkt_queue);
3186 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Changli Gao6e7676c2010-04-27 15:07:33 -07003187 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003188enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003189 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003190 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003191 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003192 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003193 return NET_RX_SUCCESS;
3194 }
3195
Eric Dumazetebda37c22010-05-06 23:51:21 +00003196 /* Schedule NAPI for backlog device
3197 * We can use non atomic operation since we own the queue lock
3198 */
3199 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003200 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003201 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003202 }
3203 goto enqueue;
3204 }
3205
Changli Gaodee42872010-05-02 05:42:16 +00003206 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003207 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003208
Tom Herbert0a9627f2010-03-16 08:03:29 +00003209 local_irq_restore(flags);
3210
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003211 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003212 kfree_skb(skb);
3213 return NET_RX_DROP;
3214}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216/**
3217 * netif_rx - post buffer to the network code
3218 * @skb: buffer to post
3219 *
3220 * This function receives a packet from a device driver and queues it for
3221 * the upper (protocol) levels to process. It always succeeds. The buffer
3222 * may be dropped during processing for congestion control or by the
3223 * protocol layers.
3224 *
3225 * return values:
3226 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 * NET_RX_DROP (packet was dropped)
3228 *
3229 */
3230
3231int netif_rx(struct sk_buff *skb)
3232{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003233 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234
3235 /* if netpoll wants it, pretend we never saw it */
3236 if (netpoll_rx(skb))
3237 return NET_RX_DROP;
3238
Eric Dumazet588f0332011-11-15 04:12:55 +00003239 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240
Koki Sanagicf66ba52010-08-23 18:45:02 +09003241 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003242#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003243 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003244 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003245 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246
Changli Gaocece1942010-08-07 20:35:43 -07003247 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003248 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003249
3250 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003251 if (cpu < 0)
3252 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003253
3254 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3255
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003256 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003257 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003258 } else
3259#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003260 {
3261 unsigned int qtail;
3262 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3263 put_cpu();
3264 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003265 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003267EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268
3269int netif_rx_ni(struct sk_buff *skb)
3270{
3271 int err;
3272
3273 preempt_disable();
3274 err = netif_rx(skb);
3275 if (local_softirq_pending())
3276 do_softirq();
3277 preempt_enable();
3278
3279 return err;
3280}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281EXPORT_SYMBOL(netif_rx_ni);
3282
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283static void net_tx_action(struct softirq_action *h)
3284{
3285 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3286
3287 if (sd->completion_queue) {
3288 struct sk_buff *clist;
3289
3290 local_irq_disable();
3291 clist = sd->completion_queue;
3292 sd->completion_queue = NULL;
3293 local_irq_enable();
3294
3295 while (clist) {
3296 struct sk_buff *skb = clist;
3297 clist = clist->next;
3298
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003299 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003300 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3301 trace_consume_skb(skb);
3302 else
3303 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 __kfree_skb(skb);
3305 }
3306 }
3307
3308 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003309 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310
3311 local_irq_disable();
3312 head = sd->output_queue;
3313 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003314 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 local_irq_enable();
3316
3317 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003318 struct Qdisc *q = head;
3319 spinlock_t *root_lock;
3320
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 head = head->next_sched;
3322
David S. Miller5fb66222008-08-02 20:02:43 -07003323 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003324 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003325 smp_mb__before_clear_bit();
3326 clear_bit(__QDISC_STATE_SCHED,
3327 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003328 qdisc_run(q);
3329 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003331 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003332 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003333 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003334 } else {
3335 smp_mb__before_clear_bit();
3336 clear_bit(__QDISC_STATE_SCHED,
3337 &q->state);
3338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 }
3340 }
3341 }
3342}
3343
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003344#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3345 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003346/* This hook is defined here for ATM LANE */
3347int (*br_fdb_test_addr_hook)(struct net_device *dev,
3348 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003349EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003350#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352#ifdef CONFIG_NET_CLS_ACT
3353/* TODO: Maybe we should just force sch_ingress to be compiled in
3354 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3355 * a compare and 2 stores extra right now if we dont have it on
3356 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003357 * NOTE: This doesn't stop any functionality; if you dont have
3358 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 *
3360 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003361static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003364 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003365 int result = TC_ACT_OK;
3366 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003367
Stephen Hemmingerde384832010-08-01 00:33:23 -07003368 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003369 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3370 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003371 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372 }
3373
Herbert Xuf697c3e2007-10-14 00:38:47 -07003374 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3375 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3376
David S. Miller83874002008-07-17 00:53:03 -07003377 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003378 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003379 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003380 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3381 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003382 spin_unlock(qdisc_lock(q));
3383 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003384
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 return result;
3386}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003387
3388static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3389 struct packet_type **pt_prev,
3390 int *ret, struct net_device *orig_dev)
3391{
Eric Dumazet24824a02010-10-02 06:11:55 +00003392 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3393
3394 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003395 goto out;
3396
3397 if (*pt_prev) {
3398 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3399 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003400 }
3401
Eric Dumazet24824a02010-10-02 06:11:55 +00003402 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003403 case TC_ACT_SHOT:
3404 case TC_ACT_STOLEN:
3405 kfree_skb(skb);
3406 return NULL;
3407 }
3408
3409out:
3410 skb->tc_verd = 0;
3411 return skb;
3412}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413#endif
3414
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003415/**
3416 * netdev_rx_handler_register - register receive handler
3417 * @dev: device to register a handler for
3418 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003419 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003420 *
3421 * Register a receive hander for a device. This handler will then be
3422 * called from __netif_receive_skb. A negative errno code is returned
3423 * on a failure.
3424 *
3425 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003426 *
3427 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003428 */
3429int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003430 rx_handler_func_t *rx_handler,
3431 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003432{
3433 ASSERT_RTNL();
3434
3435 if (dev->rx_handler)
3436 return -EBUSY;
3437
Eric Dumazet00cfec32013-03-29 03:01:22 +00003438 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003439 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003440 rcu_assign_pointer(dev->rx_handler, rx_handler);
3441
3442 return 0;
3443}
3444EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3445
3446/**
3447 * netdev_rx_handler_unregister - unregister receive handler
3448 * @dev: device to unregister a handler from
3449 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003450 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003451 *
3452 * The caller must hold the rtnl_mutex.
3453 */
3454void netdev_rx_handler_unregister(struct net_device *dev)
3455{
3456
3457 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003458 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003459 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3460 * section has a guarantee to see a non NULL rx_handler_data
3461 * as well.
3462 */
3463 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003464 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003465}
3466EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3467
Mel Gormanb4b9e352012-07-31 16:44:26 -07003468/*
3469 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3470 * the special handling of PFMEMALLOC skbs.
3471 */
3472static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3473{
3474 switch (skb->protocol) {
3475 case __constant_htons(ETH_P_ARP):
3476 case __constant_htons(ETH_P_IP):
3477 case __constant_htons(ETH_P_IPV6):
3478 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003479 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003480 return true;
3481 default:
3482 return false;
3483 }
3484}
3485
David S. Miller9754e292013-02-14 15:57:38 -05003486static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487{
3488 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003489 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003490 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003491 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003492 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003494 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495
Eric Dumazet588f0332011-11-15 04:12:55 +00003496 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003497
Koki Sanagicf66ba52010-08-23 18:45:02 +09003498 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003499
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003501 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003502 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003504 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003505
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003506 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003507 if (!skb_transport_header_was_set(skb))
3508 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003509 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510
3511 pt_prev = NULL;
3512
3513 rcu_read_lock();
3514
David S. Miller63d8ea72011-02-28 10:48:59 -08003515another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003516 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003517
3518 __this_cpu_inc(softnet_data.processed);
3519
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003520 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3521 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003522 skb = vlan_untag(skb);
3523 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003524 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003525 }
3526
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527#ifdef CONFIG_NET_CLS_ACT
3528 if (skb->tc_verd & TC_NCLS) {
3529 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3530 goto ncls;
3531 }
3532#endif
3533
David S. Miller9754e292013-02-14 15:57:38 -05003534 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003535 goto skip_taps;
3536
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003538 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003539 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003540 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 pt_prev = ptype;
3542 }
3543 }
3544
Mel Gormanb4b9e352012-07-31 16:44:26 -07003545skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003547 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3548 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003549 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550ncls:
3551#endif
3552
David S. Miller9754e292013-02-14 15:57:38 -05003553 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003554 goto drop;
3555
John Fastabend24257172011-10-10 09:16:41 +00003556 if (vlan_tx_tag_present(skb)) {
3557 if (pt_prev) {
3558 ret = deliver_skb(skb, pt_prev, orig_dev);
3559 pt_prev = NULL;
3560 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003561 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003562 goto another_round;
3563 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003564 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003565 }
3566
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003567 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003568 if (rx_handler) {
3569 if (pt_prev) {
3570 ret = deliver_skb(skb, pt_prev, orig_dev);
3571 pt_prev = NULL;
3572 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003573 switch (rx_handler(&skb)) {
3574 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003575 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003576 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003577 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003578 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003579 case RX_HANDLER_EXACT:
3580 deliver_exact = true;
3581 case RX_HANDLER_PASS:
3582 break;
3583 default:
3584 BUG();
3585 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003586 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587
Eric Dumazetd4b812d2013-07-18 07:19:26 -07003588 if (unlikely(vlan_tx_tag_present(skb))) {
3589 if (vlan_tx_tag_get_id(skb))
3590 skb->pkt_type = PACKET_OTHERHOST;
3591 /* Note: we might in the future use prio bits
3592 * and set skb->priority like in vlan_do_receive()
3593 * For the time being, just ignore Priority Code Point
3594 */
3595 skb->vlan_tci = 0;
3596 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003597
David S. Miller63d8ea72011-02-28 10:48:59 -08003598 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003599 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003600
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003602 list_for_each_entry_rcu(ptype,
3603 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003604 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003605 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3606 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003607 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003608 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 pt_prev = ptype;
3610 }
3611 }
3612
3613 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003614 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003615 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003616 else
3617 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003619drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003620 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 kfree_skb(skb);
3622 /* Jamal, now you will not able to escape explaining
3623 * me how you were going to use this. :-)
3624 */
3625 ret = NET_RX_DROP;
3626 }
3627
Mel Gormanb4b9e352012-07-31 16:44:26 -07003628unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003630out:
David S. Miller9754e292013-02-14 15:57:38 -05003631 return ret;
3632}
3633
3634static int __netif_receive_skb(struct sk_buff *skb)
3635{
3636 int ret;
3637
3638 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3639 unsigned long pflags = current->flags;
3640
3641 /*
3642 * PFMEMALLOC skbs are special, they should
3643 * - be delivered to SOCK_MEMALLOC sockets only
3644 * - stay away from userspace
3645 * - have bounded memory usage
3646 *
3647 * Use PF_MEMALLOC as this saves us from propagating the allocation
3648 * context down to all allocation sites.
3649 */
3650 current->flags |= PF_MEMALLOC;
3651 ret = __netif_receive_skb_core(skb, true);
3652 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3653 } else
3654 ret = __netif_receive_skb_core(skb, false);
3655
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 return ret;
3657}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003658
3659/**
3660 * netif_receive_skb - process receive buffer from network
3661 * @skb: buffer to process
3662 *
3663 * netif_receive_skb() is the main receive data processing function.
3664 * It always succeeds. The buffer may be dropped during processing
3665 * for congestion control or by the protocol layers.
3666 *
3667 * This function may only be called from softirq context and interrupts
3668 * should be enabled.
3669 *
3670 * Return values (usually ignored):
3671 * NET_RX_SUCCESS: no congestion
3672 * NET_RX_DROP: packet was dropped
3673 */
3674int netif_receive_skb(struct sk_buff *skb)
3675{
Eric Dumazet588f0332011-11-15 04:12:55 +00003676 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003677
Richard Cochranc1f19b52010-07-17 08:49:36 +00003678 if (skb_defer_rx_timestamp(skb))
3679 return NET_RX_SUCCESS;
3680
Eric Dumazetdf334542010-03-24 19:13:54 +00003681#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003682 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003683 struct rps_dev_flow voidflow, *rflow = &voidflow;
3684 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003685
Eric Dumazet3b098e22010-05-15 23:57:10 -07003686 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003687
Eric Dumazet3b098e22010-05-15 23:57:10 -07003688 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003689
Eric Dumazet3b098e22010-05-15 23:57:10 -07003690 if (cpu >= 0) {
3691 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3692 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003693 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003694 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003695 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003696 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003697#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003698 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003699}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003700EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701
Eric Dumazet88751272010-04-19 05:07:33 +00003702/* Network device is going away, flush any packets still pending
3703 * Called with irqs disabled.
3704 */
Changli Gao152102c2010-03-30 20:16:22 +00003705static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003706{
Changli Gao152102c2010-03-30 20:16:22 +00003707 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003708 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003709 struct sk_buff *skb, *tmp;
3710
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003711 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003712 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003713 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003714 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003715 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003716 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003717 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003718 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003719 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003720
3721 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3722 if (skb->dev == dev) {
3723 __skb_unlink(skb, &sd->process_queue);
3724 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003725 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003726 }
3727 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003728}
3729
Herbert Xud565b0a2008-12-15 23:38:52 -08003730static int napi_gro_complete(struct sk_buff *skb)
3731{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003732 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003733 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003734 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003735 int err = -ENOENT;
3736
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003737 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3738
Herbert Xufc59f9a2009-04-14 15:11:06 -07003739 if (NAPI_GRO_CB(skb)->count == 1) {
3740 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003741 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003742 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003743
3744 rcu_read_lock();
3745 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003746 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003747 continue;
3748
Jerry Chu299603e82013-12-11 20:53:45 -08003749 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08003750 break;
3751 }
3752 rcu_read_unlock();
3753
3754 if (err) {
3755 WARN_ON(&ptype->list == head);
3756 kfree_skb(skb);
3757 return NET_RX_SUCCESS;
3758 }
3759
3760out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003761 return netif_receive_skb(skb);
3762}
3763
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003764/* napi->gro_list contains packets ordered by age.
3765 * youngest packets at the head of it.
3766 * Complete skbs in reverse order to reduce latencies.
3767 */
3768void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003769{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003770 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003771
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003772 /* scan list and build reverse chain */
3773 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3774 skb->prev = prev;
3775 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003776 }
3777
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003778 for (skb = prev; skb; skb = prev) {
3779 skb->next = NULL;
3780
3781 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3782 return;
3783
3784 prev = skb->prev;
3785 napi_gro_complete(skb);
3786 napi->gro_count--;
3787 }
3788
Herbert Xud565b0a2008-12-15 23:38:52 -08003789 napi->gro_list = NULL;
3790}
Eric Dumazet86cac582010-08-31 18:25:32 +00003791EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003792
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003793static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3794{
3795 struct sk_buff *p;
3796 unsigned int maclen = skb->dev->hard_header_len;
3797
3798 for (p = napi->gro_list; p; p = p->next) {
3799 unsigned long diffs;
3800
3801 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3802 diffs |= p->vlan_tci ^ skb->vlan_tci;
3803 if (maclen == ETH_HLEN)
3804 diffs |= compare_ether_header(skb_mac_header(p),
3805 skb_gro_mac_header(skb));
3806 else if (!diffs)
3807 diffs = memcmp(skb_mac_header(p),
3808 skb_gro_mac_header(skb),
3809 maclen);
3810 NAPI_GRO_CB(p)->same_flow = !diffs;
3811 NAPI_GRO_CB(p)->flush = 0;
3812 }
3813}
3814
Jerry Chu299603e82013-12-11 20:53:45 -08003815static void skb_gro_reset_offset(struct sk_buff *skb)
3816{
3817 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3818 const skb_frag_t *frag0 = &pinfo->frags[0];
3819
3820 NAPI_GRO_CB(skb)->data_offset = 0;
3821 NAPI_GRO_CB(skb)->frag0 = NULL;
3822 NAPI_GRO_CB(skb)->frag0_len = 0;
3823
3824 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3825 pinfo->nr_frags &&
3826 !PageHighMem(skb_frag_page(frag0))) {
3827 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3828 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3829 }
3830}
3831
Rami Rosenbb728822012-11-28 21:55:25 +00003832static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003833{
3834 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003835 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003836 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003837 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003838 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003839 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003840
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003841 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003842 goto normal;
3843
David S. Miller21dc3302010-08-23 00:13:46 -07003844 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003845 goto normal;
3846
Jerry Chu299603e82013-12-11 20:53:45 -08003847 skb_gro_reset_offset(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003848 gro_list_prepare(napi, skb);
Jerry Chubf5a7552014-01-07 10:23:19 -08003849 NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003850
Herbert Xud565b0a2008-12-15 23:38:52 -08003851 rcu_read_lock();
3852 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003853 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003854 continue;
3855
Herbert Xu86911732009-01-29 14:19:50 +00003856 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003857 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003858 NAPI_GRO_CB(skb)->same_flow = 0;
3859 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003860 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003861
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003862 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003863 break;
3864 }
3865 rcu_read_unlock();
3866
3867 if (&ptype->list == head)
3868 goto normal;
3869
Herbert Xu0da2afd52008-12-26 14:57:42 -08003870 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003871 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003872
Herbert Xud565b0a2008-12-15 23:38:52 -08003873 if (pp) {
3874 struct sk_buff *nskb = *pp;
3875
3876 *pp = nskb->next;
3877 nskb->next = NULL;
3878 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003879 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003880 }
3881
Herbert Xu0da2afd52008-12-26 14:57:42 -08003882 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003883 goto ok;
3884
Herbert Xu4ae55442009-02-08 18:00:36 +00003885 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003886 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003887
Herbert Xu4ae55442009-02-08 18:00:36 +00003888 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003889 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003890 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003891 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003892 skb->next = napi->gro_list;
3893 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003894 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003895
Herbert Xuad0f9902009-02-01 01:24:55 -08003896pull:
Herbert Xucb189782009-05-26 18:50:31 +00003897 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3898 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3899
3900 BUG_ON(skb->end - skb->tail < grow);
3901
3902 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3903
3904 skb->tail += grow;
3905 skb->data_len -= grow;
3906
3907 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003908 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003909
Eric Dumazet9e903e02011-10-18 21:00:24 +00003910 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003911 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003912 memmove(skb_shinfo(skb)->frags,
3913 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003914 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003915 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003916 }
3917
Herbert Xud565b0a2008-12-15 23:38:52 -08003918ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003919 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003920
3921normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003922 ret = GRO_NORMAL;
3923 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003924}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003925
Jerry Chubf5a7552014-01-07 10:23:19 -08003926struct packet_offload *gro_find_receive_by_type(__be16 type)
3927{
3928 struct list_head *offload_head = &offload_base;
3929 struct packet_offload *ptype;
3930
3931 list_for_each_entry_rcu(ptype, offload_head, list) {
3932 if (ptype->type != type || !ptype->callbacks.gro_receive)
3933 continue;
3934 return ptype;
3935 }
3936 return NULL;
3937}
3938
3939struct packet_offload *gro_find_complete_by_type(__be16 type)
3940{
3941 struct list_head *offload_head = &offload_base;
3942 struct packet_offload *ptype;
3943
3944 list_for_each_entry_rcu(ptype, offload_head, list) {
3945 if (ptype->type != type || !ptype->callbacks.gro_complete)
3946 continue;
3947 return ptype;
3948 }
3949 return NULL;
3950}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003951
Rami Rosenbb728822012-11-28 21:55:25 +00003952static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003953{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003954 switch (ret) {
3955 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003956 if (netif_receive_skb(skb))
3957 ret = GRO_DROP;
3958 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003959
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003960 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003961 kfree_skb(skb);
3962 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003963
Eric Dumazetdaa86542012-04-19 07:07:40 +00003964 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003965 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3966 kmem_cache_free(skbuff_head_cache, skb);
3967 else
3968 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003969 break;
3970
Ben Hutchings5b252f02009-10-29 07:17:09 +00003971 case GRO_HELD:
3972 case GRO_MERGED:
3973 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003974 }
3975
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003976 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003977}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003978
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003979gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003980{
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003981 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003982}
3983EXPORT_SYMBOL(napi_gro_receive);
3984
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003985static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003986{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003987 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003988 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3989 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003990 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003991 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003992 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003993
3994 napi->skb = skb;
3995}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003996
Herbert Xu76620aa2009-04-16 02:02:07 -07003997struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003998{
Herbert Xu5d38a072009-01-04 16:13:40 -08003999 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004000
4001 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00004002 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
Eric Dumazet84b9cd62013-12-05 21:44:27 -08004003 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004004 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004005 return skb;
4006}
Herbert Xu76620aa2009-04-16 02:02:07 -07004007EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004008
Rami Rosenbb728822012-11-28 21:55:25 +00004009static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004010 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004011{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004012 switch (ret) {
4013 case GRO_NORMAL:
Jerry Chu299603e82013-12-11 20:53:45 -08004014 if (netif_receive_skb(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004015 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004016 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004017
4018 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004019 case GRO_MERGED_FREE:
4020 napi_reuse_skb(napi, skb);
4021 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004022
Jerry Chu299603e82013-12-11 20:53:45 -08004023 case GRO_HELD:
Ben Hutchings5b252f02009-10-29 07:17:09 +00004024 case GRO_MERGED:
4025 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004026 }
4027
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004028 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004029}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004030
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004031static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004032{
Herbert Xu76620aa2009-04-16 02:02:07 -07004033 struct sk_buff *skb = napi->skb;
Herbert Xu76620aa2009-04-16 02:02:07 -07004034
4035 napi->skb = NULL;
4036
Jerry Chu299603e82013-12-11 20:53:45 -08004037 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
4038 napi_reuse_skb(napi, skb);
4039 return NULL;
Herbert Xu76620aa2009-04-16 02:02:07 -07004040 }
Jerry Chu299603e82013-12-11 20:53:45 -08004041 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu76620aa2009-04-16 02:02:07 -07004042
Herbert Xu76620aa2009-04-16 02:02:07 -07004043 return skb;
4044}
Herbert Xu76620aa2009-04-16 02:02:07 -07004045
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004046gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004047{
4048 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004049
4050 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004051 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004052
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004053 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004054}
4055EXPORT_SYMBOL(napi_gro_frags);
4056
Eric Dumazete326bed2010-04-22 00:22:45 -07004057/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08004058 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07004059 * Note: called with local irq disabled, but exits with local irq enabled.
4060 */
4061static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4062{
4063#ifdef CONFIG_RPS
4064 struct softnet_data *remsd = sd->rps_ipi_list;
4065
4066 if (remsd) {
4067 sd->rps_ipi_list = NULL;
4068
4069 local_irq_enable();
4070
4071 /* Send pending IPI's to kick RPS processing on remote cpus. */
4072 while (remsd) {
4073 struct softnet_data *next = remsd->rps_ipi_next;
4074
4075 if (cpu_online(remsd->cpu))
4076 __smp_call_function_single(remsd->cpu,
4077 &remsd->csd, 0);
4078 remsd = next;
4079 }
4080 } else
4081#endif
4082 local_irq_enable();
4083}
4084
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004085static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086{
4087 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004088 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089
Eric Dumazete326bed2010-04-22 00:22:45 -07004090#ifdef CONFIG_RPS
4091 /* Check if we have pending ipi, its better to send them now,
4092 * not waiting net_rx_action() end.
4093 */
4094 if (sd->rps_ipi_list) {
4095 local_irq_disable();
4096 net_rps_action_and_irq_enable(sd);
4097 }
4098#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004099 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004100 local_irq_disable();
4101 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004103 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104
Changli Gao6e7676c2010-04-27 15:07:33 -07004105 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004106 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004107 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004108 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004109 input_queue_head_incr(sd);
4110 if (++work >= quota) {
4111 local_irq_enable();
4112 return work;
4113 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115
Changli Gao6e7676c2010-04-27 15:07:33 -07004116 rps_lock(sd);
4117 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004118 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004119 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4120 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004121
Changli Gao6e7676c2010-04-27 15:07:33 -07004122 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004123 /*
4124 * Inline a custom version of __napi_complete().
4125 * only current cpu owns and manipulates this napi,
4126 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4127 * we can use a plain write instead of clear_bit(),
4128 * and we dont need an smp_mb() memory barrier.
4129 */
4130 list_del(&napi->poll_list);
4131 napi->state = 0;
4132
Changli Gao6e7676c2010-04-27 15:07:33 -07004133 quota = work + qlen;
4134 }
4135 rps_unlock(sd);
4136 }
4137 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004139 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140}
4141
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004142/**
4143 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004144 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004145 *
4146 * The entry's receive function will be scheduled to run
4147 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004148void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004149{
4150 unsigned long flags;
4151
4152 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004153 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004154 local_irq_restore(flags);
4155}
4156EXPORT_SYMBOL(__napi_schedule);
4157
Herbert Xud565b0a2008-12-15 23:38:52 -08004158void __napi_complete(struct napi_struct *n)
4159{
4160 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4161 BUG_ON(n->gro_list);
4162
4163 list_del(&n->poll_list);
4164 smp_mb__before_clear_bit();
4165 clear_bit(NAPI_STATE_SCHED, &n->state);
4166}
4167EXPORT_SYMBOL(__napi_complete);
4168
4169void napi_complete(struct napi_struct *n)
4170{
4171 unsigned long flags;
4172
4173 /*
4174 * don't let napi dequeue from the cpu poll list
4175 * just in case its running on a different cpu
4176 */
4177 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4178 return;
4179
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004180 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004181 local_irq_save(flags);
4182 __napi_complete(n);
4183 local_irq_restore(flags);
4184}
4185EXPORT_SYMBOL(napi_complete);
4186
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004187/* must be called under rcu_read_lock(), as we dont take a reference */
4188struct napi_struct *napi_by_id(unsigned int napi_id)
4189{
4190 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4191 struct napi_struct *napi;
4192
4193 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4194 if (napi->napi_id == napi_id)
4195 return napi;
4196
4197 return NULL;
4198}
4199EXPORT_SYMBOL_GPL(napi_by_id);
4200
4201void napi_hash_add(struct napi_struct *napi)
4202{
4203 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4204
4205 spin_lock(&napi_hash_lock);
4206
4207 /* 0 is not a valid id, we also skip an id that is taken
4208 * we expect both events to be extremely rare
4209 */
4210 napi->napi_id = 0;
4211 while (!napi->napi_id) {
4212 napi->napi_id = ++napi_gen_id;
4213 if (napi_by_id(napi->napi_id))
4214 napi->napi_id = 0;
4215 }
4216
4217 hlist_add_head_rcu(&napi->napi_hash_node,
4218 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4219
4220 spin_unlock(&napi_hash_lock);
4221 }
4222}
4223EXPORT_SYMBOL_GPL(napi_hash_add);
4224
4225/* Warning : caller is responsible to make sure rcu grace period
4226 * is respected before freeing memory containing @napi
4227 */
4228void napi_hash_del(struct napi_struct *napi)
4229{
4230 spin_lock(&napi_hash_lock);
4231
4232 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4233 hlist_del_rcu(&napi->napi_hash_node);
4234
4235 spin_unlock(&napi_hash_lock);
4236}
4237EXPORT_SYMBOL_GPL(napi_hash_del);
4238
Herbert Xud565b0a2008-12-15 23:38:52 -08004239void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4240 int (*poll)(struct napi_struct *, int), int weight)
4241{
4242 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004243 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004244 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004245 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004246 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004247 if (weight > NAPI_POLL_WEIGHT)
4248 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4249 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004250 napi->weight = weight;
4251 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004252 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004253#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004254 spin_lock_init(&napi->poll_lock);
4255 napi->poll_owner = -1;
4256#endif
4257 set_bit(NAPI_STATE_SCHED, &napi->state);
4258}
4259EXPORT_SYMBOL(netif_napi_add);
4260
4261void netif_napi_del(struct napi_struct *napi)
4262{
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004263 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004264 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004265
Eric Dumazet289dccb2013-12-20 14:29:08 -08004266 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004267 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004268 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004269}
4270EXPORT_SYMBOL(netif_napi_del);
4271
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272static void net_rx_action(struct softirq_action *h)
4273{
Eric Dumazete326bed2010-04-22 00:22:45 -07004274 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004275 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004276 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004277 void *have;
4278
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 local_irq_disable();
4280
Eric Dumazete326bed2010-04-22 00:22:45 -07004281 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004282 struct napi_struct *n;
4283 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004285 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004286 * Allow this to run for 2 jiffies since which will allow
4287 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004288 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004289 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 goto softnet_break;
4291
4292 local_irq_enable();
4293
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004294 /* Even though interrupts have been re-enabled, this
4295 * access is safe because interrupts can only add new
4296 * entries to the tail of this list, and only ->poll()
4297 * calls can remove this head entry from the list.
4298 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004299 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004301 have = netpoll_poll_lock(n);
4302
4303 weight = n->weight;
4304
David S. Miller0a7606c2007-10-29 21:28:47 -07004305 /* This NAPI_STATE_SCHED test is for avoiding a race
4306 * with netpoll's poll_napi(). Only the entity which
4307 * obtains the lock and sees NAPI_STATE_SCHED set will
4308 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004309 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004310 */
4311 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004312 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004313 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004314 trace_napi_poll(n);
4315 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004316
4317 WARN_ON_ONCE(work > weight);
4318
4319 budget -= work;
4320
4321 local_irq_disable();
4322
4323 /* Drivers must not modify the NAPI state if they
4324 * consume the entire weight. In such cases this code
4325 * still "owns" the NAPI instance and therefore can
4326 * move the instance around on the list at-will.
4327 */
David S. Millerfed17f32008-01-07 21:00:40 -08004328 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004329 if (unlikely(napi_disable_pending(n))) {
4330 local_irq_enable();
4331 napi_complete(n);
4332 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004333 } else {
4334 if (n->gro_list) {
4335 /* flush too old packets
4336 * If HZ < 1000, flush all packets.
4337 */
4338 local_irq_enable();
4339 napi_gro_flush(n, HZ >= 1000);
4340 local_irq_disable();
4341 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004342 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004343 }
David S. Millerfed17f32008-01-07 21:00:40 -08004344 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004345
4346 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 }
4348out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004349 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004350
Chris Leechdb217332006-06-17 21:24:58 -07004351#ifdef CONFIG_NET_DMA
4352 /*
4353 * There may not be any more sk_buffs coming right now, so push
4354 * any pending DMA copies to hardware
4355 */
Dan Williams2ba05622009-01-06 11:38:14 -07004356 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004357#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004358
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359 return;
4360
4361softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004362 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4364 goto out;
4365}
4366
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004367struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004368 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004369
4370 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004371 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004372
Veaceslav Falico5d261912013-08-28 23:25:05 +02004373 /* counter for the number of times this device was added to us */
4374 u16 ref_nr;
4375
Veaceslav Falico402dae92013-09-25 09:20:09 +02004376 /* private field for the users */
4377 void *private;
4378
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004379 struct list_head list;
4380 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004381};
4382
Veaceslav Falico5d261912013-08-28 23:25:05 +02004383static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4384 struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004385 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004386{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004387 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004388
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004389 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004390 if (adj->dev == adj_dev)
4391 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004392 }
4393 return NULL;
4394}
4395
4396/**
4397 * netdev_has_upper_dev - Check if device is linked to an upper device
4398 * @dev: device
4399 * @upper_dev: upper device to check
4400 *
4401 * Find out if a device is linked to specified upper device and return true
4402 * in case it is. Note that this checks only immediate upper device,
4403 * not through a complete stack of devices. The caller must hold the RTNL lock.
4404 */
4405bool netdev_has_upper_dev(struct net_device *dev,
4406 struct net_device *upper_dev)
4407{
4408 ASSERT_RTNL();
4409
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004410 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004411}
4412EXPORT_SYMBOL(netdev_has_upper_dev);
4413
4414/**
4415 * netdev_has_any_upper_dev - Check if device is linked to some device
4416 * @dev: device
4417 *
4418 * Find out if a device is linked to an upper device and return true in case
4419 * it is. The caller must hold the RTNL lock.
4420 */
stephen hemminger1d143d92013-12-29 14:01:29 -08004421static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004422{
4423 ASSERT_RTNL();
4424
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004425 return !list_empty(&dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004426}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004427
4428/**
4429 * netdev_master_upper_dev_get - Get master upper device
4430 * @dev: device
4431 *
4432 * Find a master upper device and return pointer to it or NULL in case
4433 * it's not there. The caller must hold the RTNL lock.
4434 */
4435struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4436{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004437 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004438
4439 ASSERT_RTNL();
4440
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004441 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004442 return NULL;
4443
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004444 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004445 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004446 if (likely(upper->master))
4447 return upper->dev;
4448 return NULL;
4449}
4450EXPORT_SYMBOL(netdev_master_upper_dev_get);
4451
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02004452void *netdev_adjacent_get_private(struct list_head *adj_list)
4453{
4454 struct netdev_adjacent *adj;
4455
4456 adj = list_entry(adj_list, struct netdev_adjacent, list);
4457
4458 return adj->private;
4459}
4460EXPORT_SYMBOL(netdev_adjacent_get_private);
4461
Veaceslav Falico31088a12013-09-25 09:20:12 +02004462/**
4463 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
Veaceslav Falico48311f42013-08-28 23:25:07 +02004464 * @dev: device
4465 * @iter: list_head ** of the current position
4466 *
4467 * Gets the next device from the dev's upper list, starting from iter
4468 * position. The caller must hold RCU read lock.
4469 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004470struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4471 struct list_head **iter)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004472{
4473 struct netdev_adjacent *upper;
4474
John Fastabend85328242013-11-26 06:33:52 +00004475 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
Veaceslav Falico48311f42013-08-28 23:25:07 +02004476
4477 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4478
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004479 if (&upper->list == &dev->all_adj_list.upper)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004480 return NULL;
4481
4482 *iter = &upper->list;
4483
4484 return upper->dev;
4485}
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004486EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
Veaceslav Falico48311f42013-08-28 23:25:07 +02004487
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004488/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02004489 * netdev_lower_get_next_private - Get the next ->private from the
4490 * lower neighbour list
4491 * @dev: device
4492 * @iter: list_head ** of the current position
4493 *
4494 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4495 * list, starting from iter position. The caller must hold either hold the
4496 * RTNL lock or its own locking that guarantees that the neighbour lower
4497 * list will remain unchainged.
4498 */
4499void *netdev_lower_get_next_private(struct net_device *dev,
4500 struct list_head **iter)
4501{
4502 struct netdev_adjacent *lower;
4503
4504 lower = list_entry(*iter, struct netdev_adjacent, list);
4505
4506 if (&lower->list == &dev->adj_list.lower)
4507 return NULL;
4508
4509 if (iter)
4510 *iter = lower->list.next;
4511
4512 return lower->private;
4513}
4514EXPORT_SYMBOL(netdev_lower_get_next_private);
4515
4516/**
4517 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4518 * lower neighbour list, RCU
4519 * variant
4520 * @dev: device
4521 * @iter: list_head ** of the current position
4522 *
4523 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4524 * list, starting from iter position. The caller must hold RCU read lock.
4525 */
4526void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4527 struct list_head **iter)
4528{
4529 struct netdev_adjacent *lower;
4530
4531 WARN_ON_ONCE(!rcu_read_lock_held());
4532
4533 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4534
4535 if (&lower->list == &dev->adj_list.lower)
4536 return NULL;
4537
4538 if (iter)
4539 *iter = &lower->list;
4540
4541 return lower->private;
4542}
4543EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4544
4545/**
dingtianhonge001bfa2013-12-13 10:19:55 +08004546 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4547 * lower neighbour list, RCU
4548 * variant
4549 * @dev: device
4550 *
4551 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4552 * list. The caller must hold RCU read lock.
4553 */
4554void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4555{
4556 struct netdev_adjacent *lower;
4557
4558 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4559 struct netdev_adjacent, list);
4560 if (lower)
4561 return lower->private;
4562 return NULL;
4563}
4564EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4565
4566/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004567 * netdev_master_upper_dev_get_rcu - Get master upper device
4568 * @dev: device
4569 *
4570 * Find a master upper device and return pointer to it or NULL in case
4571 * it's not there. The caller must hold the RCU read lock.
4572 */
4573struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4574{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004575 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004576
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004577 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004578 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004579 if (upper && likely(upper->master))
4580 return upper->dev;
4581 return NULL;
4582}
4583EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4584
Veaceslav Falico5d261912013-08-28 23:25:05 +02004585static int __netdev_adjacent_dev_insert(struct net_device *dev,
4586 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02004587 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004588 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004589{
4590 struct netdev_adjacent *adj;
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004591 char linkname[IFNAMSIZ+7];
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004592 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004593
Veaceslav Falico7863c052013-09-25 09:20:06 +02004594 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004595
4596 if (adj) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004597 adj->ref_nr++;
4598 return 0;
4599 }
4600
4601 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4602 if (!adj)
4603 return -ENOMEM;
4604
4605 adj->dev = adj_dev;
4606 adj->master = master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004607 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02004608 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004609 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004610
4611 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4612 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004613
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004614 if (dev_list == &dev->adj_list.lower) {
4615 sprintf(linkname, "lower_%s", adj_dev->name);
4616 ret = sysfs_create_link(&(dev->dev.kobj),
4617 &(adj_dev->dev.kobj), linkname);
4618 if (ret)
4619 goto free_adj;
4620 } else if (dev_list == &dev->adj_list.upper) {
4621 sprintf(linkname, "upper_%s", adj_dev->name);
4622 ret = sysfs_create_link(&(dev->dev.kobj),
4623 &(adj_dev->dev.kobj), linkname);
4624 if (ret)
4625 goto free_adj;
4626 }
4627
Veaceslav Falico7863c052013-09-25 09:20:06 +02004628 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004629 if (master) {
4630 ret = sysfs_create_link(&(dev->dev.kobj),
4631 &(adj_dev->dev.kobj), "master");
4632 if (ret)
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004633 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004634
Veaceslav Falico7863c052013-09-25 09:20:06 +02004635 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004636 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02004637 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004638 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004639
4640 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004641
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004642remove_symlinks:
4643 if (dev_list == &dev->adj_list.lower) {
4644 sprintf(linkname, "lower_%s", adj_dev->name);
4645 sysfs_remove_link(&(dev->dev.kobj), linkname);
4646 } else if (dev_list == &dev->adj_list.upper) {
4647 sprintf(linkname, "upper_%s", adj_dev->name);
4648 sysfs_remove_link(&(dev->dev.kobj), linkname);
4649 }
4650
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004651free_adj:
4652 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02004653 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004654
4655 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004656}
4657
stephen hemminger1d143d92013-12-29 14:01:29 -08004658static void __netdev_adjacent_dev_remove(struct net_device *dev,
4659 struct net_device *adj_dev,
4660 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004661{
4662 struct netdev_adjacent *adj;
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004663 char linkname[IFNAMSIZ+7];
Veaceslav Falico5d261912013-08-28 23:25:05 +02004664
Veaceslav Falico7863c052013-09-25 09:20:06 +02004665 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004666
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004667 if (!adj) {
4668 pr_err("tried to remove device %s from %s\n",
4669 dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004670 BUG();
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004671 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004672
4673 if (adj->ref_nr > 1) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004674 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4675 adj->ref_nr-1);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004676 adj->ref_nr--;
4677 return;
4678 }
4679
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004680 if (adj->master)
4681 sysfs_remove_link(&(dev->dev.kobj), "master");
4682
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004683 if (dev_list == &dev->adj_list.lower) {
4684 sprintf(linkname, "lower_%s", adj_dev->name);
4685 sysfs_remove_link(&(dev->dev.kobj), linkname);
4686 } else if (dev_list == &dev->adj_list.upper) {
4687 sprintf(linkname, "upper_%s", adj_dev->name);
4688 sysfs_remove_link(&(dev->dev.kobj), linkname);
4689 }
4690
Veaceslav Falico5d261912013-08-28 23:25:05 +02004691 list_del_rcu(&adj->list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004692 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4693 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004694 dev_put(adj_dev);
4695 kfree_rcu(adj, rcu);
4696}
4697
stephen hemminger1d143d92013-12-29 14:01:29 -08004698static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4699 struct net_device *upper_dev,
4700 struct list_head *up_list,
4701 struct list_head *down_list,
4702 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004703{
4704 int ret;
4705
Veaceslav Falico402dae92013-09-25 09:20:09 +02004706 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4707 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004708 if (ret)
4709 return ret;
4710
Veaceslav Falico402dae92013-09-25 09:20:09 +02004711 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4712 false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004713 if (ret) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004714 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004715 return ret;
4716 }
4717
4718 return 0;
4719}
4720
stephen hemminger1d143d92013-12-29 14:01:29 -08004721static int __netdev_adjacent_dev_link(struct net_device *dev,
4722 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004723{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004724 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4725 &dev->all_adj_list.upper,
4726 &upper_dev->all_adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004727 NULL, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004728}
4729
stephen hemminger1d143d92013-12-29 14:01:29 -08004730static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4731 struct net_device *upper_dev,
4732 struct list_head *up_list,
4733 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004734{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004735 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4736 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004737}
4738
stephen hemminger1d143d92013-12-29 14:01:29 -08004739static void __netdev_adjacent_dev_unlink(struct net_device *dev,
4740 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004741{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004742 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4743 &dev->all_adj_list.upper,
4744 &upper_dev->all_adj_list.lower);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004745}
4746
stephen hemminger1d143d92013-12-29 14:01:29 -08004747static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4748 struct net_device *upper_dev,
4749 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004750{
4751 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4752
4753 if (ret)
4754 return ret;
4755
4756 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4757 &dev->adj_list.upper,
4758 &upper_dev->adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004759 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004760 if (ret) {
4761 __netdev_adjacent_dev_unlink(dev, upper_dev);
4762 return ret;
4763 }
4764
4765 return 0;
4766}
4767
stephen hemminger1d143d92013-12-29 14:01:29 -08004768static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4769 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004770{
4771 __netdev_adjacent_dev_unlink(dev, upper_dev);
4772 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4773 &dev->adj_list.upper,
4774 &upper_dev->adj_list.lower);
4775}
Veaceslav Falico5d261912013-08-28 23:25:05 +02004776
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004777static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004778 struct net_device *upper_dev, bool master,
4779 void *private)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004780{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004781 struct netdev_adjacent *i, *j, *to_i, *to_j;
4782 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004783
4784 ASSERT_RTNL();
4785
4786 if (dev == upper_dev)
4787 return -EBUSY;
4788
4789 /* To prevent loops, check if dev is not upper device to upper_dev. */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004790 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004791 return -EBUSY;
4792
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004793 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004794 return -EEXIST;
4795
4796 if (master && netdev_master_upper_dev_get(dev))
4797 return -EBUSY;
4798
Veaceslav Falico402dae92013-09-25 09:20:09 +02004799 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
4800 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004801 if (ret)
4802 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004803
Veaceslav Falico5d261912013-08-28 23:25:05 +02004804 /* Now that we linked these devs, make all the upper_dev's
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004805 * all_adj_list.upper visible to every dev's all_adj_list.lower an
Veaceslav Falico5d261912013-08-28 23:25:05 +02004806 * versa, and don't forget the devices itself. All of these
4807 * links are non-neighbours.
4808 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004809 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4810 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
4811 pr_debug("Interlinking %s with %s, non-neighbour\n",
4812 i->dev->name, j->dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004813 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
4814 if (ret)
4815 goto rollback_mesh;
4816 }
4817 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004818
Veaceslav Falico5d261912013-08-28 23:25:05 +02004819 /* add dev to every upper_dev's upper device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004820 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
4821 pr_debug("linking %s's upper device %s with %s\n",
4822 upper_dev->name, i->dev->name, dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004823 ret = __netdev_adjacent_dev_link(dev, i->dev);
4824 if (ret)
4825 goto rollback_upper_mesh;
4826 }
4827
4828 /* add upper_dev to every dev's lower device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004829 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4830 pr_debug("linking %s's lower device %s with %s\n", dev->name,
4831 i->dev->name, upper_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004832 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
4833 if (ret)
4834 goto rollback_lower_mesh;
4835 }
4836
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004837 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004838 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004839
4840rollback_lower_mesh:
4841 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004842 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004843 if (i == to_i)
4844 break;
4845 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4846 }
4847
4848 i = NULL;
4849
4850rollback_upper_mesh:
4851 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004852 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004853 if (i == to_i)
4854 break;
4855 __netdev_adjacent_dev_unlink(dev, i->dev);
4856 }
4857
4858 i = j = NULL;
4859
4860rollback_mesh:
4861 to_i = i;
4862 to_j = j;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004863 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4864 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004865 if (i == to_i && j == to_j)
4866 break;
4867 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4868 }
4869 if (i == to_i)
4870 break;
4871 }
4872
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004873 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004874
4875 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004876}
4877
4878/**
4879 * netdev_upper_dev_link - Add a link to the upper device
4880 * @dev: device
4881 * @upper_dev: new upper device
4882 *
4883 * Adds a link to device which is upper to this one. The caller must hold
4884 * the RTNL lock. On a failure a negative errno code is returned.
4885 * On success the reference counts are adjusted and the function
4886 * returns zero.
4887 */
4888int netdev_upper_dev_link(struct net_device *dev,
4889 struct net_device *upper_dev)
4890{
Veaceslav Falico402dae92013-09-25 09:20:09 +02004891 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004892}
4893EXPORT_SYMBOL(netdev_upper_dev_link);
4894
4895/**
4896 * netdev_master_upper_dev_link - Add a master link to the upper device
4897 * @dev: device
4898 * @upper_dev: new upper device
4899 *
4900 * Adds a link to device which is upper to this one. In this case, only
4901 * one master upper device can be linked, although other non-master devices
4902 * might be linked as well. The caller must hold the RTNL lock.
4903 * On a failure a negative errno code is returned. On success the reference
4904 * counts are adjusted and the function returns zero.
4905 */
4906int netdev_master_upper_dev_link(struct net_device *dev,
4907 struct net_device *upper_dev)
4908{
Veaceslav Falico402dae92013-09-25 09:20:09 +02004909 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004910}
4911EXPORT_SYMBOL(netdev_master_upper_dev_link);
4912
Veaceslav Falico402dae92013-09-25 09:20:09 +02004913int netdev_master_upper_dev_link_private(struct net_device *dev,
4914 struct net_device *upper_dev,
4915 void *private)
4916{
4917 return __netdev_upper_dev_link(dev, upper_dev, true, private);
4918}
4919EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
4920
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004921/**
4922 * netdev_upper_dev_unlink - Removes a link to upper device
4923 * @dev: device
4924 * @upper_dev: new upper device
4925 *
4926 * Removes a link to device which is upper to this one. The caller must hold
4927 * the RTNL lock.
4928 */
4929void netdev_upper_dev_unlink(struct net_device *dev,
4930 struct net_device *upper_dev)
4931{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004932 struct netdev_adjacent *i, *j;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004933 ASSERT_RTNL();
4934
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004935 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004936
4937 /* Here is the tricky part. We must remove all dev's lower
4938 * devices from all upper_dev's upper devices and vice
4939 * versa, to maintain the graph relationship.
4940 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004941 list_for_each_entry(i, &dev->all_adj_list.lower, list)
4942 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004943 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4944
4945 /* remove also the devices itself from lower/upper device
4946 * list
4947 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004948 list_for_each_entry(i, &dev->all_adj_list.lower, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004949 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4950
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004951 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004952 __netdev_adjacent_dev_unlink(dev, i->dev);
4953
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004954 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004955}
4956EXPORT_SYMBOL(netdev_upper_dev_unlink);
4957
Veaceslav Falico402dae92013-09-25 09:20:09 +02004958void *netdev_lower_dev_get_private(struct net_device *dev,
4959 struct net_device *lower_dev)
4960{
4961 struct netdev_adjacent *lower;
4962
4963 if (!lower_dev)
4964 return NULL;
4965 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
4966 if (!lower)
4967 return NULL;
4968
4969 return lower->private;
4970}
4971EXPORT_SYMBOL(netdev_lower_dev_get_private);
4972
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004973static void dev_change_rx_flags(struct net_device *dev, int flags)
4974{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004975 const struct net_device_ops *ops = dev->netdev_ops;
4976
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05004977 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004978 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004979}
4980
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02004981static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07004982{
Eric Dumazetb536db92011-11-30 21:42:26 +00004983 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004984 kuid_t uid;
4985 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07004986
Patrick McHardy24023452007-07-14 18:51:31 -07004987 ASSERT_RTNL();
4988
Wang Chendad9b332008-06-18 01:48:28 -07004989 dev->flags |= IFF_PROMISC;
4990 dev->promiscuity += inc;
4991 if (dev->promiscuity == 0) {
4992 /*
4993 * Avoid overflow.
4994 * If inc causes overflow, untouch promisc and return error.
4995 */
4996 if (inc < 0)
4997 dev->flags &= ~IFF_PROMISC;
4998 else {
4999 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005000 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5001 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005002 return -EOVERFLOW;
5003 }
5004 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005005 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005006 pr_info("device %s %s promiscuous mode\n",
5007 dev->name,
5008 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005009 if (audit_enabled) {
5010 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005011 audit_log(current->audit_context, GFP_ATOMIC,
5012 AUDIT_ANOM_PROMISCUOUS,
5013 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5014 dev->name, (dev->flags & IFF_PROMISC),
5015 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005016 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005017 from_kuid(&init_user_ns, uid),
5018 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005019 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005020 }
Patrick McHardy24023452007-07-14 18:51:31 -07005021
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005022 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005023 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005024 if (notify)
5025 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07005026 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005027}
5028
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029/**
5030 * dev_set_promiscuity - update promiscuity count on a device
5031 * @dev: device
5032 * @inc: modifier
5033 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005034 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 * remains above zero the interface remains promiscuous. Once it hits zero
5036 * the device reverts back to normal filtering operation. A negative inc
5037 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005038 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 */
Wang Chendad9b332008-06-18 01:48:28 -07005040int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041{
Eric Dumazetb536db92011-11-30 21:42:26 +00005042 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005043 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005045 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005046 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005047 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005048 if (dev->flags != old_flags)
5049 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005050 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005052EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005054static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005056 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057
Patrick McHardy24023452007-07-14 18:51:31 -07005058 ASSERT_RTNL();
5059
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005061 dev->allmulti += inc;
5062 if (dev->allmulti == 0) {
5063 /*
5064 * Avoid overflow.
5065 * If inc causes overflow, untouch allmulti and return error.
5066 */
5067 if (inc < 0)
5068 dev->flags &= ~IFF_ALLMULTI;
5069 else {
5070 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005071 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5072 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005073 return -EOVERFLOW;
5074 }
5075 }
Patrick McHardy24023452007-07-14 18:51:31 -07005076 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005077 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005078 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005079 if (notify)
5080 __dev_notify_flags(dev, old_flags,
5081 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07005082 }
Wang Chendad9b332008-06-18 01:48:28 -07005083 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005084}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005085
5086/**
5087 * dev_set_allmulti - update allmulti count on a device
5088 * @dev: device
5089 * @inc: modifier
5090 *
5091 * Add or remove reception of all multicast frames to a device. While the
5092 * count in the device remains above zero the interface remains listening
5093 * to all interfaces. Once it hits zero the device reverts back to normal
5094 * filtering operation. A negative @inc value is used to drop the counter
5095 * when releasing a resource needing all multicasts.
5096 * Return 0 if successful or a negative errno code on error.
5097 */
5098
5099int dev_set_allmulti(struct net_device *dev, int inc)
5100{
5101 return __dev_set_allmulti(dev, inc, true);
5102}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005103EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005104
5105/*
5106 * Upload unicast and multicast address lists to device and
5107 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005108 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005109 * are present.
5110 */
5111void __dev_set_rx_mode(struct net_device *dev)
5112{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005113 const struct net_device_ops *ops = dev->netdev_ops;
5114
Patrick McHardy4417da62007-06-27 01:28:10 -07005115 /* dev_open will call this function so the list will stay sane. */
5116 if (!(dev->flags&IFF_UP))
5117 return;
5118
5119 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005120 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005121
Jiri Pirko01789342011-08-16 06:29:00 +00005122 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005123 /* Unicast addresses changes may only happen under the rtnl,
5124 * therefore calling __dev_set_promiscuity here is safe.
5125 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005126 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005127 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005128 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005129 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005130 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005131 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005132 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005133 }
Jiri Pirko01789342011-08-16 06:29:00 +00005134
5135 if (ops->ndo_set_rx_mode)
5136 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005137}
5138
5139void dev_set_rx_mode(struct net_device *dev)
5140{
David S. Millerb9e40852008-07-15 00:15:08 -07005141 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005142 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005143 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144}
5145
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005146/**
5147 * dev_get_flags - get flags reported to userspace
5148 * @dev: device
5149 *
5150 * Get the combination of flag bits exported through APIs to userspace.
5151 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005152unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153{
Eric Dumazet95c96172012-04-15 05:58:06 +00005154 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005155
5156 flags = (dev->flags & ~(IFF_PROMISC |
5157 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005158 IFF_RUNNING |
5159 IFF_LOWER_UP |
5160 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 (dev->gflags & (IFF_PROMISC |
5162 IFF_ALLMULTI));
5163
Stefan Rompfb00055a2006-03-20 17:09:11 -08005164 if (netif_running(dev)) {
5165 if (netif_oper_up(dev))
5166 flags |= IFF_RUNNING;
5167 if (netif_carrier_ok(dev))
5168 flags |= IFF_LOWER_UP;
5169 if (netif_dormant(dev))
5170 flags |= IFF_DORMANT;
5171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
5173 return flags;
5174}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005175EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176
Patrick McHardybd380812010-02-26 06:34:53 +00005177int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178{
Eric Dumazetb536db92011-11-30 21:42:26 +00005179 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005180 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005181
Patrick McHardy24023452007-07-14 18:51:31 -07005182 ASSERT_RTNL();
5183
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184 /*
5185 * Set the flags on our device.
5186 */
5187
5188 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5189 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5190 IFF_AUTOMEDIA)) |
5191 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5192 IFF_ALLMULTI));
5193
5194 /*
5195 * Load in the correct multicast list now the flags have changed.
5196 */
5197
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005198 if ((old_flags ^ flags) & IFF_MULTICAST)
5199 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005200
Patrick McHardy4417da62007-06-27 01:28:10 -07005201 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202
5203 /*
5204 * Have we downed the interface. We handle IFF_UP ourselves
5205 * according to user attempts to set it, rather than blindly
5206 * setting it.
5207 */
5208
5209 ret = 0;
5210 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005211 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212
5213 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005214 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215 }
5216
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005218 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005219 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005220
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005222
5223 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5224 if (dev->flags != old_flags)
5225 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 }
5227
5228 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5229 is important. Some (broken) drivers set IFF_PROMISC, when
5230 IFF_ALLMULTI is requested not asking us and not reporting.
5231 */
5232 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005233 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5234
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005236 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 }
5238
Patrick McHardybd380812010-02-26 06:34:53 +00005239 return ret;
5240}
5241
Nicolas Dichtela528c212013-09-25 12:02:44 +02005242void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5243 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00005244{
5245 unsigned int changes = dev->flags ^ old_flags;
5246
Nicolas Dichtela528c212013-09-25 12:02:44 +02005247 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005248 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005249
Patrick McHardybd380812010-02-26 06:34:53 +00005250 if (changes & IFF_UP) {
5251 if (dev->flags & IFF_UP)
5252 call_netdevice_notifiers(NETDEV_UP, dev);
5253 else
5254 call_netdevice_notifiers(NETDEV_DOWN, dev);
5255 }
5256
5257 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00005258 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5259 struct netdev_notifier_change_info change_info;
5260
5261 change_info.flags_changed = changes;
5262 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5263 &change_info.info);
5264 }
Patrick McHardybd380812010-02-26 06:34:53 +00005265}
5266
5267/**
5268 * dev_change_flags - change device settings
5269 * @dev: device
5270 * @flags: device state flags
5271 *
5272 * Change settings on device based state flags. The flags are
5273 * in the userspace exported format.
5274 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005275int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005276{
Eric Dumazetb536db92011-11-30 21:42:26 +00005277 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005278 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00005279
5280 ret = __dev_change_flags(dev, flags);
5281 if (ret < 0)
5282 return ret;
5283
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005284 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005285 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286 return ret;
5287}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005288EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005290/**
5291 * dev_set_mtu - Change maximum transfer unit
5292 * @dev: device
5293 * @new_mtu: new transfer unit
5294 *
5295 * Change the maximum transfer size of the network device.
5296 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005297int dev_set_mtu(struct net_device *dev, int new_mtu)
5298{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005299 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300 int err;
5301
5302 if (new_mtu == dev->mtu)
5303 return 0;
5304
5305 /* MTU must be positive. */
5306 if (new_mtu < 0)
5307 return -EINVAL;
5308
5309 if (!netif_device_present(dev))
5310 return -ENODEV;
5311
5312 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005313 if (ops->ndo_change_mtu)
5314 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 else
5316 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005317
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005318 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005319 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320 return err;
5321}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005322EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005324/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005325 * dev_set_group - Change group this device belongs to
5326 * @dev: device
5327 * @new_group: group this device should belong to
5328 */
5329void dev_set_group(struct net_device *dev, int new_group)
5330{
5331 dev->group = new_group;
5332}
5333EXPORT_SYMBOL(dev_set_group);
5334
5335/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005336 * dev_set_mac_address - Change Media Access Control Address
5337 * @dev: device
5338 * @sa: new address
5339 *
5340 * Change the hardware (MAC) address of the device
5341 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5343{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005344 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005345 int err;
5346
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005347 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348 return -EOPNOTSUPP;
5349 if (sa->sa_family != dev->type)
5350 return -EINVAL;
5351 if (!netif_device_present(dev))
5352 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005353 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005354 if (err)
5355 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005356 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005357 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005358 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005359 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005361EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005362
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005363/**
5364 * dev_change_carrier - Change device carrier
5365 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00005366 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005367 *
5368 * Change device carrier
5369 */
5370int dev_change_carrier(struct net_device *dev, bool new_carrier)
5371{
5372 const struct net_device_ops *ops = dev->netdev_ops;
5373
5374 if (!ops->ndo_change_carrier)
5375 return -EOPNOTSUPP;
5376 if (!netif_device_present(dev))
5377 return -ENODEV;
5378 return ops->ndo_change_carrier(dev, new_carrier);
5379}
5380EXPORT_SYMBOL(dev_change_carrier);
5381
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02005383 * dev_get_phys_port_id - Get device physical port ID
5384 * @dev: device
5385 * @ppid: port ID
5386 *
5387 * Get device physical port ID
5388 */
5389int dev_get_phys_port_id(struct net_device *dev,
5390 struct netdev_phys_port_id *ppid)
5391{
5392 const struct net_device_ops *ops = dev->netdev_ops;
5393
5394 if (!ops->ndo_get_phys_port_id)
5395 return -EOPNOTSUPP;
5396 return ops->ndo_get_phys_port_id(dev, ppid);
5397}
5398EXPORT_SYMBOL(dev_get_phys_port_id);
5399
5400/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005401 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005402 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 *
5404 * Returns a suitable unique value for a new device interface
5405 * number. The caller must hold the rtnl semaphore or the
5406 * dev_base_lock to be sure it remains unique.
5407 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005408static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005410 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005411 for (;;) {
5412 if (++ifindex <= 0)
5413 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005414 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005415 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416 }
5417}
5418
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005420static LIST_HEAD(net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005421static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005423static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005426 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005427}
5428
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005429static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005430{
Krishna Kumare93737b2009-12-08 22:26:02 +00005431 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005432 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005433
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005434 BUG_ON(dev_boot_phase);
5435 ASSERT_RTNL();
5436
Krishna Kumare93737b2009-12-08 22:26:02 +00005437 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005438 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005439 * for initialization unwind. Remove those
5440 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005441 */
5442 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005443 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5444 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005445
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005446 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005447 list_del(&dev->unreg_list);
5448 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005449 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005450 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005451 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005452 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005453
Octavian Purdila44345722010-12-13 12:44:07 +00005454 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005455 list_for_each_entry(dev, head, unreg_list)
5456 list_add_tail(&dev->close_list, &close_head);
5457 dev_close_many(&close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005458
Octavian Purdila44345722010-12-13 12:44:07 +00005459 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005460 /* And unlink it from device chain. */
5461 unlist_netdevice(dev);
5462
5463 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005464 }
5465
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005466 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005467
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005468 list_for_each_entry(dev, head, unreg_list) {
5469 /* Shutdown queueing discipline. */
5470 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005471
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005472
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005473 /* Notify protocols, that we are about to destroy
5474 this device. They should clean all the things.
5475 */
5476 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5477
Patrick McHardya2835762010-02-26 06:34:51 +00005478 if (!dev->rtnl_link_ops ||
5479 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005480 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Patrick McHardya2835762010-02-26 06:34:51 +00005481
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005482 /*
5483 * Flush the unicast and multicast chains
5484 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005485 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005486 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005487
5488 if (dev->netdev_ops->ndo_uninit)
5489 dev->netdev_ops->ndo_uninit(dev);
5490
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005491 /* Notifier chain MUST detach us all upper devices. */
5492 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005493
5494 /* Remove entries from kobject tree */
5495 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00005496#ifdef CONFIG_XPS
5497 /* Remove XPS queueing entries */
5498 netif_reset_xps_queues_gt(dev, 0);
5499#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005500 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005501
Eric W. Biederman850a5452011-10-13 22:25:23 +00005502 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005503
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005504 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005505 dev_put(dev);
5506}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005507
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005508static void rollback_registered(struct net_device *dev)
5509{
5510 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005511
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005512 list_add(&dev->unreg_list, &single);
5513 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005514 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005515}
5516
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005517static netdev_features_t netdev_fix_features(struct net_device *dev,
5518 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005519{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005520 /* Fix illegal checksum combinations */
5521 if ((features & NETIF_F_HW_CSUM) &&
5522 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005523 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005524 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5525 }
5526
Herbert Xub63365a2008-10-23 01:11:29 -07005527 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005528 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005529 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005530 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005531 }
5532
Pravin B Shelarec5f0612013-03-07 09:28:01 +00005533 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5534 !(features & NETIF_F_IP_CSUM)) {
5535 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5536 features &= ~NETIF_F_TSO;
5537 features &= ~NETIF_F_TSO_ECN;
5538 }
5539
5540 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5541 !(features & NETIF_F_IPV6_CSUM)) {
5542 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5543 features &= ~NETIF_F_TSO6;
5544 }
5545
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005546 /* TSO ECN requires that TSO is present as well. */
5547 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5548 features &= ~NETIF_F_TSO_ECN;
5549
Michał Mirosław212b5732011-02-15 16:59:16 +00005550 /* Software GSO depends on SG. */
5551 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005552 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005553 features &= ~NETIF_F_GSO;
5554 }
5555
Michał Mirosławacd11302011-01-24 15:45:15 -08005556 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005557 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005558 /* maybe split UFO into V4 and V6? */
5559 if (!((features & NETIF_F_GEN_CSUM) ||
5560 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5561 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005562 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005563 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005564 features &= ~NETIF_F_UFO;
5565 }
5566
5567 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005568 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005569 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005570 features &= ~NETIF_F_UFO;
5571 }
5572 }
5573
5574 return features;
5575}
Herbert Xub63365a2008-10-23 01:11:29 -07005576
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005577int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005578{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005579 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005580 int err = 0;
5581
Michał Mirosław87267482011-04-12 09:56:38 +00005582 ASSERT_RTNL();
5583
Michał Mirosław5455c692011-02-15 16:59:17 +00005584 features = netdev_get_wanted_features(dev);
5585
5586 if (dev->netdev_ops->ndo_fix_features)
5587 features = dev->netdev_ops->ndo_fix_features(dev, features);
5588
5589 /* driver might be less strict about feature dependencies */
5590 features = netdev_fix_features(dev, features);
5591
5592 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005593 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005594
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005595 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5596 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005597
5598 if (dev->netdev_ops->ndo_set_features)
5599 err = dev->netdev_ops->ndo_set_features(dev, features);
5600
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005601 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005602 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005603 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5604 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005605 return -1;
5606 }
5607
5608 if (!err)
5609 dev->features = features;
5610
5611 return 1;
5612}
5613
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005614/**
5615 * netdev_update_features - recalculate device features
5616 * @dev: the device to check
5617 *
5618 * Recalculate dev->features set and send notifications if it
5619 * has changed. Should be called after driver or hardware dependent
5620 * conditions might have changed that influence the features.
5621 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005622void netdev_update_features(struct net_device *dev)
5623{
5624 if (__netdev_update_features(dev))
5625 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005626}
5627EXPORT_SYMBOL(netdev_update_features);
5628
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005630 * netdev_change_features - recalculate device features
5631 * @dev: the device to check
5632 *
5633 * Recalculate dev->features set and send notifications even
5634 * if they have not changed. Should be called instead of
5635 * netdev_update_features() if also dev->vlan_features might
5636 * have changed to allow the changes to be propagated to stacked
5637 * VLAN devices.
5638 */
5639void netdev_change_features(struct net_device *dev)
5640{
5641 __netdev_update_features(dev);
5642 netdev_features_change(dev);
5643}
5644EXPORT_SYMBOL(netdev_change_features);
5645
5646/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005647 * netif_stacked_transfer_operstate - transfer operstate
5648 * @rootdev: the root or lower level device to transfer state from
5649 * @dev: the device to transfer operstate to
5650 *
5651 * Transfer operational state from root to device. This is normally
5652 * called when a stacking relationship exists between the root
5653 * device and the device(a leaf device).
5654 */
5655void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5656 struct net_device *dev)
5657{
5658 if (rootdev->operstate == IF_OPER_DORMANT)
5659 netif_dormant_on(dev);
5660 else
5661 netif_dormant_off(dev);
5662
5663 if (netif_carrier_ok(rootdev)) {
5664 if (!netif_carrier_ok(dev))
5665 netif_carrier_on(dev);
5666 } else {
5667 if (netif_carrier_ok(dev))
5668 netif_carrier_off(dev);
5669 }
5670}
5671EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5672
Tom Herbertbf264142010-11-26 08:36:09 +00005673#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005674static int netif_alloc_rx_queues(struct net_device *dev)
5675{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005676 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005677 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005678
Tom Herbertbd25fa72010-10-18 18:00:16 +00005679 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005680
Tom Herbertbd25fa72010-10-18 18:00:16 +00005681 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005682 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005683 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005684
Tom Herbertbd25fa72010-10-18 18:00:16 +00005685 dev->_rx = rx;
5686
Tom Herbertbd25fa72010-10-18 18:00:16 +00005687 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005688 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005689 return 0;
5690}
Tom Herbertbf264142010-11-26 08:36:09 +00005691#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005692
Changli Gaoaa942102010-12-04 02:31:41 +00005693static void netdev_init_one_queue(struct net_device *dev,
5694 struct netdev_queue *queue, void *_unused)
5695{
5696 /* Initialize queue lock */
5697 spin_lock_init(&queue->_xmit_lock);
5698 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5699 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005700 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005701 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005702#ifdef CONFIG_BQL
5703 dql_init(&queue->dql, HZ);
5704#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005705}
5706
Eric Dumazet60877a32013-06-20 01:15:51 -07005707static void netif_free_tx_queues(struct net_device *dev)
5708{
5709 if (is_vmalloc_addr(dev->_tx))
5710 vfree(dev->_tx);
5711 else
5712 kfree(dev->_tx);
5713}
5714
Tom Herberte6484932010-10-18 18:04:39 +00005715static int netif_alloc_netdev_queues(struct net_device *dev)
5716{
5717 unsigned int count = dev->num_tx_queues;
5718 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07005719 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00005720
Eric Dumazet60877a32013-06-20 01:15:51 -07005721 BUG_ON(count < 1 || count > 0xffff);
Tom Herberte6484932010-10-18 18:04:39 +00005722
Eric Dumazet60877a32013-06-20 01:15:51 -07005723 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
5724 if (!tx) {
5725 tx = vzalloc(sz);
5726 if (!tx)
5727 return -ENOMEM;
5728 }
Tom Herberte6484932010-10-18 18:04:39 +00005729 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005730
Tom Herberte6484932010-10-18 18:04:39 +00005731 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5732 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005733
5734 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005735}
5736
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005737/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005738 * register_netdevice - register a network device
5739 * @dev: device to register
5740 *
5741 * Take a completed network device structure and add it to the kernel
5742 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5743 * chain. 0 is returned on success. A negative errno code is returned
5744 * on a failure to set up the device, or if the name is a duplicate.
5745 *
5746 * Callers must hold the rtnl semaphore. You may want
5747 * register_netdev() instead of this.
5748 *
5749 * BUGS:
5750 * The locking appears insufficient to guarantee two parallel registers
5751 * will not get the same name.
5752 */
5753
5754int register_netdevice(struct net_device *dev)
5755{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005756 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005757 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005758
5759 BUG_ON(dev_boot_phase);
5760 ASSERT_RTNL();
5761
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005762 might_sleep();
5763
Linus Torvalds1da177e2005-04-16 15:20:36 -07005764 /* When net_device's are persistent, this will be fatal. */
5765 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005766 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005767
David S. Millerf1f28aa2008-07-15 00:08:33 -07005768 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005769 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005770
Linus Torvalds1da177e2005-04-16 15:20:36 -07005771 dev->iflink = -1;
5772
Gao feng828de4f2012-09-13 20:58:27 +00005773 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005774 if (ret < 0)
5775 goto out;
5776
Linus Torvalds1da177e2005-04-16 15:20:36 -07005777 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005778 if (dev->netdev_ops->ndo_init) {
5779 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780 if (ret) {
5781 if (ret > 0)
5782 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005783 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784 }
5785 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005786
Patrick McHardyf6469682013-04-19 02:04:27 +00005787 if (((dev->hw_features | dev->features) &
5788 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005789 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5790 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5791 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5792 ret = -EINVAL;
5793 goto err_uninit;
5794 }
5795
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005796 ret = -EBUSY;
5797 if (!dev->ifindex)
5798 dev->ifindex = dev_new_index(net);
5799 else if (__dev_get_by_index(net, dev->ifindex))
5800 goto err_uninit;
5801
Linus Torvalds1da177e2005-04-16 15:20:36 -07005802 if (dev->iflink == -1)
5803 dev->iflink = dev->ifindex;
5804
Michał Mirosław5455c692011-02-15 16:59:17 +00005805 /* Transfer changeable features to wanted_features and enable
5806 * software offloads (GSO and GRO).
5807 */
5808 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005809 dev->features |= NETIF_F_SOFT_FEATURES;
5810 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811
Michał Mirosław34324dc2011-11-15 15:29:55 +00005812 if (!(dev->flags & IFF_LOOPBACK)) {
5813 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005814 }
5815
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005816 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005817 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005818 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005819
Pravin B Shelaree579672013-03-07 09:28:08 +00005820 /* Make NETIF_F_SG inheritable to tunnel devices.
5821 */
5822 dev->hw_enc_features |= NETIF_F_SG;
5823
Simon Horman0d89d202013-05-23 21:02:52 +00005824 /* Make NETIF_F_SG inheritable to MPLS.
5825 */
5826 dev->mpls_features |= NETIF_F_SG;
5827
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005828 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5829 ret = notifier_to_errno(ret);
5830 if (ret)
5831 goto err_uninit;
5832
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005833 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005834 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005835 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005836 dev->reg_state = NETREG_REGISTERED;
5837
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005838 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005839
Linus Torvalds1da177e2005-04-16 15:20:36 -07005840 /*
5841 * Default initial state at registry is that the
5842 * device is present.
5843 */
5844
5845 set_bit(__LINK_STATE_PRESENT, &dev->state);
5846
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005847 linkwatch_init_dev(dev);
5848
Linus Torvalds1da177e2005-04-16 15:20:36 -07005849 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005851 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005852 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005853
Jiri Pirko948b3372013-01-08 01:38:25 +00005854 /* If the device has permanent device address, driver should
5855 * set dev_addr and also addr_assign_type should be set to
5856 * NET_ADDR_PERM (default value).
5857 */
5858 if (dev->addr_assign_type == NET_ADDR_PERM)
5859 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5860
Linus Torvalds1da177e2005-04-16 15:20:36 -07005861 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005862 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005863 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005864 if (ret) {
5865 rollback_registered(dev);
5866 dev->reg_state = NETREG_UNREGISTERED;
5867 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005868 /*
5869 * Prevent userspace races by waiting until the network
5870 * device is fully setup before sending notifications.
5871 */
Patrick McHardya2835762010-02-26 06:34:51 +00005872 if (!dev->rtnl_link_ops ||
5873 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005874 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005875
5876out:
5877 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005878
5879err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005880 if (dev->netdev_ops->ndo_uninit)
5881 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005882 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005883}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005884EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005885
5886/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005887 * init_dummy_netdev - init a dummy network device for NAPI
5888 * @dev: device to init
5889 *
5890 * This takes a network device structure and initialize the minimum
5891 * amount of fields so it can be used to schedule NAPI polls without
5892 * registering a full blown interface. This is to be used by drivers
5893 * that need to tie several hardware interfaces to a single NAPI
5894 * poll scheduler due to HW limitations.
5895 */
5896int init_dummy_netdev(struct net_device *dev)
5897{
5898 /* Clear everything. Note we don't initialize spinlocks
5899 * are they aren't supposed to be taken by any of the
5900 * NAPI code and this dummy netdev is supposed to be
5901 * only ever used for NAPI polls
5902 */
5903 memset(dev, 0, sizeof(struct net_device));
5904
5905 /* make sure we BUG if trying to hit standard
5906 * register/unregister code path
5907 */
5908 dev->reg_state = NETREG_DUMMY;
5909
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005910 /* NAPI wants this */
5911 INIT_LIST_HEAD(&dev->napi_list);
5912
5913 /* a dummy interface is started by default */
5914 set_bit(__LINK_STATE_PRESENT, &dev->state);
5915 set_bit(__LINK_STATE_START, &dev->state);
5916
Eric Dumazet29b44332010-10-11 10:22:12 +00005917 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5918 * because users of this 'device' dont need to change
5919 * its refcount.
5920 */
5921
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005922 return 0;
5923}
5924EXPORT_SYMBOL_GPL(init_dummy_netdev);
5925
5926
5927/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005928 * register_netdev - register a network device
5929 * @dev: device to register
5930 *
5931 * Take a completed network device structure and add it to the kernel
5932 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5933 * chain. 0 is returned on success. A negative errno code is returned
5934 * on a failure to set up the device, or if the name is a duplicate.
5935 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005936 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005937 * and expands the device name if you passed a format string to
5938 * alloc_netdev.
5939 */
5940int register_netdev(struct net_device *dev)
5941{
5942 int err;
5943
5944 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005946 rtnl_unlock();
5947 return err;
5948}
5949EXPORT_SYMBOL(register_netdev);
5950
Eric Dumazet29b44332010-10-11 10:22:12 +00005951int netdev_refcnt_read(const struct net_device *dev)
5952{
5953 int i, refcnt = 0;
5954
5955 for_each_possible_cpu(i)
5956 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5957 return refcnt;
5958}
5959EXPORT_SYMBOL(netdev_refcnt_read);
5960
Ben Hutchings2c530402012-07-10 10:55:09 +00005961/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005962 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005963 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005964 *
5965 * This is called when unregistering network devices.
5966 *
5967 * Any protocol or device that holds a reference should register
5968 * for netdevice notification, and cleanup and put back the
5969 * reference if they receive an UNREGISTER event.
5970 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005971 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005972 */
5973static void netdev_wait_allrefs(struct net_device *dev)
5974{
5975 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00005976 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005977
Eric Dumazete014deb2009-11-17 05:59:21 +00005978 linkwatch_forget_dev(dev);
5979
Linus Torvalds1da177e2005-04-16 15:20:36 -07005980 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00005981 refcnt = netdev_refcnt_read(dev);
5982
5983 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005984 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005985 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005986
5987 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005988 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005989
Eric Dumazet748e2d92012-08-22 21:50:59 +00005990 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005991 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00005992 rtnl_lock();
5993
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005994 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005995 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5996 &dev->state)) {
5997 /* We must not have linkwatch events
5998 * pending on unregister. If this
5999 * happens, we simply run the queue
6000 * unscheduled, resulting in a noop
6001 * for this device.
6002 */
6003 linkwatch_run_queue();
6004 }
6005
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006006 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006007
6008 rebroadcast_time = jiffies;
6009 }
6010
6011 msleep(250);
6012
Eric Dumazet29b44332010-10-11 10:22:12 +00006013 refcnt = netdev_refcnt_read(dev);
6014
Linus Torvalds1da177e2005-04-16 15:20:36 -07006015 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006016 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6017 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006018 warning_time = jiffies;
6019 }
6020 }
6021}
6022
6023/* The sequence is:
6024 *
6025 * rtnl_lock();
6026 * ...
6027 * register_netdevice(x1);
6028 * register_netdevice(x2);
6029 * ...
6030 * unregister_netdevice(y1);
6031 * unregister_netdevice(y2);
6032 * ...
6033 * rtnl_unlock();
6034 * free_netdev(y1);
6035 * free_netdev(y2);
6036 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006037 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006038 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006039 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006040 * without deadlocking with linkwatch via keventd.
6041 * 2) Since we run with the RTNL semaphore not held, we can sleep
6042 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006043 *
6044 * We must not return until all unregister events added during
6045 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006047void netdev_run_todo(void)
6048{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006049 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006050
Linus Torvalds1da177e2005-04-16 15:20:36 -07006051 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006052 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006053
6054 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006055
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006056
6057 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006058 if (!list_empty(&list))
6059 rcu_barrier();
6060
Linus Torvalds1da177e2005-04-16 15:20:36 -07006061 while (!list_empty(&list)) {
6062 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006063 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006064 list_del(&dev->todo_list);
6065
Eric Dumazet748e2d92012-08-22 21:50:59 +00006066 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006067 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006068 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006069
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006070 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006071 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006073 dump_stack();
6074 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006075 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006076
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006077 dev->reg_state = NETREG_UNREGISTERED;
6078
Changli Gao152102c2010-03-30 20:16:22 +00006079 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006080
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006081 netdev_wait_allrefs(dev);
6082
6083 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006084 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006085 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6086 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006087 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006088
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006089 if (dev->destructor)
6090 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006091
Eric W. Biederman50624c92013-09-23 21:19:49 -07006092 /* Report a network device has been unregistered */
6093 rtnl_lock();
6094 dev_net(dev)->dev_unreg_count--;
6095 __rtnl_unlock();
6096 wake_up(&netdev_unregistering_wq);
6097
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006098 /* Free network device */
6099 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006101}
6102
Ben Hutchings3cfde792010-07-09 09:11:52 +00006103/* Convert net_device_stats to rtnl_link_stats64. They have the same
6104 * fields in the same order, with only the type differing.
6105 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006106void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6107 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006108{
6109#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006110 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6111 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006112#else
6113 size_t i, n = sizeof(*stats64) / sizeof(u64);
6114 const unsigned long *src = (const unsigned long *)netdev_stats;
6115 u64 *dst = (u64 *)stats64;
6116
6117 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6118 sizeof(*stats64) / sizeof(u64));
6119 for (i = 0; i < n; i++)
6120 dst[i] = src[i];
6121#endif
6122}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006123EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006124
Eric Dumazetd83345a2009-11-16 03:36:51 +00006125/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006126 * dev_get_stats - get network device statistics
6127 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006128 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006129 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006130 * Get network statistics from device. Return @storage.
6131 * The device driver may provide its own method by setting
6132 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6133 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006134 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006135struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6136 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006137{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006138 const struct net_device_ops *ops = dev->netdev_ops;
6139
Eric Dumazet28172732010-07-07 14:58:56 -07006140 if (ops->ndo_get_stats64) {
6141 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006142 ops->ndo_get_stats64(dev, storage);
6143 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006144 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006145 } else {
6146 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006147 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006148 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006149 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006150}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006151EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006152
Eric Dumazet24824a02010-10-02 06:11:55 +00006153struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006154{
Eric Dumazet24824a02010-10-02 06:11:55 +00006155 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006156
Eric Dumazet24824a02010-10-02 06:11:55 +00006157#ifdef CONFIG_NET_CLS_ACT
6158 if (queue)
6159 return queue;
6160 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6161 if (!queue)
6162 return NULL;
6163 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006164 queue->qdisc = &noop_qdisc;
6165 queue->qdisc_sleeping = &noop_qdisc;
6166 rcu_assign_pointer(dev->ingress_queue, queue);
6167#endif
6168 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006169}
6170
Eric Dumazet2c60db02012-09-16 09:17:26 +00006171static const struct ethtool_ops default_ethtool_ops;
6172
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00006173void netdev_set_default_ethtool_ops(struct net_device *dev,
6174 const struct ethtool_ops *ops)
6175{
6176 if (dev->ethtool_ops == &default_ethtool_ops)
6177 dev->ethtool_ops = ops;
6178}
6179EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6180
Eric Dumazet74d332c2013-10-30 13:10:44 -07006181void netdev_freemem(struct net_device *dev)
6182{
6183 char *addr = (char *)dev - dev->padded;
6184
6185 if (is_vmalloc_addr(addr))
6186 vfree(addr);
6187 else
6188 kfree(addr);
6189}
6190
Linus Torvalds1da177e2005-04-16 15:20:36 -07006191/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006192 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006193 * @sizeof_priv: size of private data to allocate space for
6194 * @name: device name format string
6195 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006196 * @txqs: the number of TX subqueues to allocate
6197 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198 *
6199 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006200 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006201 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006202 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006203struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6204 void (*setup)(struct net_device *),
6205 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006206{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006207 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006208 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006209 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006210
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006211 BUG_ON(strlen(name) >= sizeof(dev->name));
6212
Tom Herbert36909ea2011-01-09 19:36:31 +00006213 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006214 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006215 return NULL;
6216 }
6217
Tom Herbert36909ea2011-01-09 19:36:31 +00006218#ifdef CONFIG_RPS
6219 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006220 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006221 return NULL;
6222 }
6223#endif
6224
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006225 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006226 if (sizeof_priv) {
6227 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006228 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006229 alloc_size += sizeof_priv;
6230 }
6231 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006232 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006233
Eric Dumazet74d332c2013-10-30 13:10:44 -07006234 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6235 if (!p)
6236 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00006237 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006238 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006240 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006241 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006242
Eric Dumazet29b44332010-10-11 10:22:12 +00006243 dev->pcpu_refcnt = alloc_percpu(int);
6244 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07006245 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006246
Linus Torvalds1da177e2005-04-16 15:20:36 -07006247 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006248 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006249
Jiri Pirko22bedad32010-04-01 21:22:57 +00006250 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006251 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006252
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006253 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006254
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006255 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006256 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006257
Herbert Xud565b0a2008-12-15 23:38:52 -08006258 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006259 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006260 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006261 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006262 INIT_LIST_HEAD(&dev->adj_list.upper);
6263 INIT_LIST_HEAD(&dev->adj_list.lower);
6264 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6265 INIT_LIST_HEAD(&dev->all_adj_list.lower);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006266 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006267 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006268
6269 dev->num_tx_queues = txqs;
6270 dev->real_num_tx_queues = txqs;
6271 if (netif_alloc_netdev_queues(dev))
6272 goto free_all;
6273
6274#ifdef CONFIG_RPS
6275 dev->num_rx_queues = rxqs;
6276 dev->real_num_rx_queues = rxqs;
6277 if (netif_alloc_rx_queues(dev))
6278 goto free_all;
6279#endif
6280
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006282 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006283 if (!dev->ethtool_ops)
6284 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006285 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006286
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006287free_all:
6288 free_netdev(dev);
6289 return NULL;
6290
Eric Dumazet29b44332010-10-11 10:22:12 +00006291free_pcpu:
6292 free_percpu(dev->pcpu_refcnt);
Eric Dumazet60877a32013-06-20 01:15:51 -07006293 netif_free_tx_queues(dev);
Tom Herbertfe822242010-11-09 10:47:38 +00006294#ifdef CONFIG_RPS
6295 kfree(dev->_rx);
6296#endif
6297
Eric Dumazet74d332c2013-10-30 13:10:44 -07006298free_dev:
6299 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006300 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006301}
Tom Herbert36909ea2011-01-09 19:36:31 +00006302EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006303
6304/**
6305 * free_netdev - free network device
6306 * @dev: device
6307 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006308 * This function does the last stage of destroying an allocated device
6309 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006310 * If this is the last reference then it will be freed.
6311 */
6312void free_netdev(struct net_device *dev)
6313{
Herbert Xud565b0a2008-12-15 23:38:52 -08006314 struct napi_struct *p, *n;
6315
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006316 release_net(dev_net(dev));
6317
Eric Dumazet60877a32013-06-20 01:15:51 -07006318 netif_free_tx_queues(dev);
Tom Herbertfe822242010-11-09 10:47:38 +00006319#ifdef CONFIG_RPS
6320 kfree(dev->_rx);
6321#endif
David S. Millere8a04642008-07-17 00:34:19 -07006322
Eric Dumazet33d480c2011-08-11 19:30:52 +00006323 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006324
Jiri Pirkof001fde2009-05-05 02:48:28 +00006325 /* Flush device addresses */
6326 dev_addr_flush(dev);
6327
Herbert Xud565b0a2008-12-15 23:38:52 -08006328 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6329 netif_napi_del(p);
6330
Eric Dumazet29b44332010-10-11 10:22:12 +00006331 free_percpu(dev->pcpu_refcnt);
6332 dev->pcpu_refcnt = NULL;
6333
Stephen Hemminger3041a062006-05-26 13:25:24 -07006334 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006335 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07006336 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006337 return;
6338 }
6339
6340 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6341 dev->reg_state = NETREG_RELEASED;
6342
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006343 /* will free via device release */
6344 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006345}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006346EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006347
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006348/**
6349 * synchronize_net - Synchronize with packet receive processing
6350 *
6351 * Wait for packets currently being received to be done.
6352 * Does not block later packets from starting.
6353 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006354void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006355{
6356 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006357 if (rtnl_is_locked())
6358 synchronize_rcu_expedited();
6359 else
6360 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006362EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363
6364/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006365 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006366 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006367 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006368 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006369 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006370 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006371 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006372 *
6373 * Callers must hold the rtnl semaphore. You may want
6374 * unregister_netdev() instead of this.
6375 */
6376
Eric Dumazet44a08732009-10-27 07:03:04 +00006377void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006378{
Herbert Xua6620712007-12-12 19:21:56 -08006379 ASSERT_RTNL();
6380
Eric Dumazet44a08732009-10-27 07:03:04 +00006381 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006382 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006383 } else {
6384 rollback_registered(dev);
6385 /* Finish processing unregister after unlock */
6386 net_set_todo(dev);
6387 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006388}
Eric Dumazet44a08732009-10-27 07:03:04 +00006389EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006390
6391/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006392 * unregister_netdevice_many - unregister many devices
6393 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006394 */
6395void unregister_netdevice_many(struct list_head *head)
6396{
6397 struct net_device *dev;
6398
6399 if (!list_empty(head)) {
6400 rollback_registered_many(head);
6401 list_for_each_entry(dev, head, unreg_list)
6402 net_set_todo(dev);
6403 }
6404}
Eric Dumazet63c80992009-10-27 07:06:49 +00006405EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006406
6407/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006408 * unregister_netdev - remove device from the kernel
6409 * @dev: device
6410 *
6411 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006412 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006413 *
6414 * This is just a wrapper for unregister_netdevice that takes
6415 * the rtnl semaphore. In general you want to use this and not
6416 * unregister_netdevice.
6417 */
6418void unregister_netdev(struct net_device *dev)
6419{
6420 rtnl_lock();
6421 unregister_netdevice(dev);
6422 rtnl_unlock();
6423}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006424EXPORT_SYMBOL(unregister_netdev);
6425
Eric W. Biedermance286d32007-09-12 13:53:49 +02006426/**
6427 * dev_change_net_namespace - move device to different nethost namespace
6428 * @dev: device
6429 * @net: network namespace
6430 * @pat: If not NULL name pattern to try if the current device name
6431 * is already taken in the destination network namespace.
6432 *
6433 * This function shuts down a device interface and moves it
6434 * to a new network namespace. On success 0 is returned, on
6435 * a failure a netagive errno code is returned.
6436 *
6437 * Callers must hold the rtnl semaphore.
6438 */
6439
6440int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6441{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006442 int err;
6443
6444 ASSERT_RTNL();
6445
6446 /* Don't allow namespace local devices to be moved. */
6447 err = -EINVAL;
6448 if (dev->features & NETIF_F_NETNS_LOCAL)
6449 goto out;
6450
6451 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006452 if (dev->reg_state != NETREG_REGISTERED)
6453 goto out;
6454
6455 /* Get out if there is nothing todo */
6456 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006457 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006458 goto out;
6459
6460 /* Pick the destination device name, and ensure
6461 * we can use it in the destination network namespace.
6462 */
6463 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006464 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006465 /* We get here if we can't use the current device name */
6466 if (!pat)
6467 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006468 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006469 goto out;
6470 }
6471
6472 /*
6473 * And now a mini version of register_netdevice unregister_netdevice.
6474 */
6475
6476 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006477 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006478
6479 /* And unlink it from device chain */
6480 err = -ENODEV;
6481 unlist_netdevice(dev);
6482
6483 synchronize_net();
6484
6485 /* Shutdown queueing discipline. */
6486 dev_shutdown(dev);
6487
6488 /* Notify protocols, that we are about to destroy
6489 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006490
6491 Note that dev->reg_state stays at NETREG_REGISTERED.
6492 This is wanted because this way 8021q and macvlan know
6493 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006494 */
6495 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006496 rcu_barrier();
6497 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006498 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006499
6500 /*
6501 * Flush the unicast and multicast chains
6502 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006503 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006504 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006505
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006506 /* Send a netdev-removed uevent to the old namespace */
6507 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6508
Eric W. Biedermance286d32007-09-12 13:53:49 +02006509 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006510 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006511
Eric W. Biedermance286d32007-09-12 13:53:49 +02006512 /* If there is an ifindex conflict assign a new one */
6513 if (__dev_get_by_index(net, dev->ifindex)) {
6514 int iflink = (dev->iflink == dev->ifindex);
6515 dev->ifindex = dev_new_index(net);
6516 if (iflink)
6517 dev->iflink = dev->ifindex;
6518 }
6519
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006520 /* Send a netdev-add uevent to the new namespace */
6521 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6522
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006523 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006524 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006525 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006526
6527 /* Add the device back in the hashes */
6528 list_netdevice(dev);
6529
6530 /* Notify protocols, that a new device appeared. */
6531 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6532
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006533 /*
6534 * Prevent userspace races by waiting until the network
6535 * device is fully setup before sending notifications.
6536 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006537 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006538
Eric W. Biedermance286d32007-09-12 13:53:49 +02006539 synchronize_net();
6540 err = 0;
6541out:
6542 return err;
6543}
Johannes Berg463d0182009-07-14 00:33:35 +02006544EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006545
Linus Torvalds1da177e2005-04-16 15:20:36 -07006546static int dev_cpu_callback(struct notifier_block *nfb,
6547 unsigned long action,
6548 void *ocpu)
6549{
6550 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006551 struct sk_buff *skb;
6552 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6553 struct softnet_data *sd, *oldsd;
6554
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006555 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006556 return NOTIFY_OK;
6557
6558 local_irq_disable();
6559 cpu = smp_processor_id();
6560 sd = &per_cpu(softnet_data, cpu);
6561 oldsd = &per_cpu(softnet_data, oldcpu);
6562
6563 /* Find end of our completion_queue. */
6564 list_skb = &sd->completion_queue;
6565 while (*list_skb)
6566 list_skb = &(*list_skb)->next;
6567 /* Append completion queue from offline CPU. */
6568 *list_skb = oldsd->completion_queue;
6569 oldsd->completion_queue = NULL;
6570
Linus Torvalds1da177e2005-04-16 15:20:36 -07006571 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006572 if (oldsd->output_queue) {
6573 *sd->output_queue_tailp = oldsd->output_queue;
6574 sd->output_queue_tailp = oldsd->output_queue_tailp;
6575 oldsd->output_queue = NULL;
6576 oldsd->output_queue_tailp = &oldsd->output_queue;
6577 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006578 /* Append NAPI poll list from offline CPU. */
6579 if (!list_empty(&oldsd->poll_list)) {
6580 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6581 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006583
6584 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6585 local_irq_enable();
6586
6587 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006588 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6589 netif_rx(skb);
6590 input_queue_head_incr(oldsd);
6591 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006592 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006594 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006595 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006596
6597 return NOTIFY_OK;
6598}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006599
6600
Herbert Xu7f353bf2007-08-10 15:47:58 -07006601/**
Herbert Xub63365a2008-10-23 01:11:29 -07006602 * netdev_increment_features - increment feature set by one
6603 * @all: current feature set
6604 * @one: new feature set
6605 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006606 *
6607 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006608 * @one to the master device with current feature set @all. Will not
6609 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006610 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006611netdev_features_t netdev_increment_features(netdev_features_t all,
6612 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006613{
Michał Mirosław1742f182011-04-22 06:31:16 +00006614 if (mask & NETIF_F_GEN_CSUM)
6615 mask |= NETIF_F_ALL_CSUM;
6616 mask |= NETIF_F_VLAN_CHALLENGED;
6617
6618 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6619 all &= one | ~NETIF_F_ALL_FOR_ALL;
6620
Michał Mirosław1742f182011-04-22 06:31:16 +00006621 /* If one device supports hw checksumming, set for all. */
6622 if (all & NETIF_F_GEN_CSUM)
6623 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006624
6625 return all;
6626}
Herbert Xub63365a2008-10-23 01:11:29 -07006627EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006628
Baruch Siach430f03c2013-06-02 20:43:55 +00006629static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006630{
6631 int i;
6632 struct hlist_head *hash;
6633
6634 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6635 if (hash != NULL)
6636 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6637 INIT_HLIST_HEAD(&hash[i]);
6638
6639 return hash;
6640}
6641
Eric W. Biederman881d9662007-09-17 11:56:21 -07006642/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006643static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006644{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006645 if (net != &init_net)
6646 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006647
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006648 net->dev_name_head = netdev_create_hash();
6649 if (net->dev_name_head == NULL)
6650 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006651
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006652 net->dev_index_head = netdev_create_hash();
6653 if (net->dev_index_head == NULL)
6654 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006655
6656 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006657
6658err_idx:
6659 kfree(net->dev_name_head);
6660err_name:
6661 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006662}
6663
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006664/**
6665 * netdev_drivername - network driver for the device
6666 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006667 *
6668 * Determine network driver for device.
6669 */
David S. Miller3019de12011-06-06 16:41:33 -07006670const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006671{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006672 const struct device_driver *driver;
6673 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006674 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006675
6676 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006677 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006678 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006679
6680 driver = parent->driver;
6681 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006682 return driver->name;
6683 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006684}
6685
Joe Perchesb004ff42012-09-12 20:12:19 -07006686static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006687 struct va_format *vaf)
6688{
6689 int r;
6690
Joe Perchesb004ff42012-09-12 20:12:19 -07006691 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006692 r = dev_printk_emit(level[1] - '0',
6693 dev->dev.parent,
6694 "%s %s %s: %pV",
6695 dev_driver_string(dev->dev.parent),
6696 dev_name(dev->dev.parent),
6697 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006698 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006699 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006700 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006701 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006702 }
Joe Perches256df2f2010-06-27 01:02:35 +00006703
6704 return r;
6705}
6706
6707int netdev_printk(const char *level, const struct net_device *dev,
6708 const char *format, ...)
6709{
6710 struct va_format vaf;
6711 va_list args;
6712 int r;
6713
6714 va_start(args, format);
6715
6716 vaf.fmt = format;
6717 vaf.va = &args;
6718
6719 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006720
Joe Perches256df2f2010-06-27 01:02:35 +00006721 va_end(args);
6722
6723 return r;
6724}
6725EXPORT_SYMBOL(netdev_printk);
6726
6727#define define_netdev_printk_level(func, level) \
6728int func(const struct net_device *dev, const char *fmt, ...) \
6729{ \
6730 int r; \
6731 struct va_format vaf; \
6732 va_list args; \
6733 \
6734 va_start(args, fmt); \
6735 \
6736 vaf.fmt = fmt; \
6737 vaf.va = &args; \
6738 \
6739 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006740 \
Joe Perches256df2f2010-06-27 01:02:35 +00006741 va_end(args); \
6742 \
6743 return r; \
6744} \
6745EXPORT_SYMBOL(func);
6746
6747define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6748define_netdev_printk_level(netdev_alert, KERN_ALERT);
6749define_netdev_printk_level(netdev_crit, KERN_CRIT);
6750define_netdev_printk_level(netdev_err, KERN_ERR);
6751define_netdev_printk_level(netdev_warn, KERN_WARNING);
6752define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6753define_netdev_printk_level(netdev_info, KERN_INFO);
6754
Pavel Emelyanov46650792007-10-08 20:38:39 -07006755static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006756{
6757 kfree(net->dev_name_head);
6758 kfree(net->dev_index_head);
6759}
6760
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006761static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006762 .init = netdev_init,
6763 .exit = netdev_exit,
6764};
6765
Pavel Emelyanov46650792007-10-08 20:38:39 -07006766static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006767{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006768 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006769 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006770 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006771 * initial network namespace
6772 */
6773 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006774 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006775 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006776 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006777
6778 /* Ignore unmoveable devices (i.e. loopback) */
6779 if (dev->features & NETIF_F_NETNS_LOCAL)
6780 continue;
6781
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006782 /* Leave virtual devices for the generic cleanup */
6783 if (dev->rtnl_link_ops)
6784 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006785
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006786 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006787 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6788 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006789 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006790 pr_emerg("%s: failed to move %s to init_net: %d\n",
6791 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006792 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006793 }
6794 }
6795 rtnl_unlock();
6796}
6797
Eric W. Biederman50624c92013-09-23 21:19:49 -07006798static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6799{
6800 /* Return with the rtnl_lock held when there are no network
6801 * devices unregistering in any network namespace in net_list.
6802 */
6803 struct net *net;
6804 bool unregistering;
6805 DEFINE_WAIT(wait);
6806
6807 for (;;) {
6808 prepare_to_wait(&netdev_unregistering_wq, &wait,
6809 TASK_UNINTERRUPTIBLE);
6810 unregistering = false;
6811 rtnl_lock();
6812 list_for_each_entry(net, net_list, exit_list) {
6813 if (net->dev_unreg_count > 0) {
6814 unregistering = true;
6815 break;
6816 }
6817 }
6818 if (!unregistering)
6819 break;
6820 __rtnl_unlock();
6821 schedule();
6822 }
6823 finish_wait(&netdev_unregistering_wq, &wait);
6824}
6825
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006826static void __net_exit default_device_exit_batch(struct list_head *net_list)
6827{
6828 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006829 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006830 * Do this across as many network namespaces as possible to
6831 * improve batching efficiency.
6832 */
6833 struct net_device *dev;
6834 struct net *net;
6835 LIST_HEAD(dev_kill_list);
6836
Eric W. Biederman50624c92013-09-23 21:19:49 -07006837 /* To prevent network device cleanup code from dereferencing
6838 * loopback devices or network devices that have been freed
6839 * wait here for all pending unregistrations to complete,
6840 * before unregistring the loopback device and allowing the
6841 * network namespace be freed.
6842 *
6843 * The netdev todo list containing all network devices
6844 * unregistrations that happen in default_device_exit_batch
6845 * will run in the rtnl_unlock() at the end of
6846 * default_device_exit_batch.
6847 */
6848 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006849 list_for_each_entry(net, net_list, exit_list) {
6850 for_each_netdev_reverse(net, dev) {
6851 if (dev->rtnl_link_ops)
6852 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6853 else
6854 unregister_netdevice_queue(dev, &dev_kill_list);
6855 }
6856 }
6857 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006858 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006859 rtnl_unlock();
6860}
6861
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006862static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006863 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006864 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006865};
6866
Linus Torvalds1da177e2005-04-16 15:20:36 -07006867/*
6868 * Initialize the DEV module. At boot time this walks the device list and
6869 * unhooks any devices that fail to initialise (normally hardware not
6870 * present) and leaves us with a valid list of present and active devices.
6871 *
6872 */
6873
6874/*
6875 * This is called single threaded during boot, so no need
6876 * to take the rtnl semaphore.
6877 */
6878static int __init net_dev_init(void)
6879{
6880 int i, rc = -ENOMEM;
6881
6882 BUG_ON(!dev_boot_phase);
6883
Linus Torvalds1da177e2005-04-16 15:20:36 -07006884 if (dev_proc_init())
6885 goto out;
6886
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006887 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006888 goto out;
6889
6890 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006891 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006892 INIT_LIST_HEAD(&ptype_base[i]);
6893
Vlad Yasevich62532da2012-11-15 08:49:10 +00006894 INIT_LIST_HEAD(&offload_base);
6895
Eric W. Biederman881d9662007-09-17 11:56:21 -07006896 if (register_pernet_subsys(&netdev_net_ops))
6897 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006898
6899 /*
6900 * Initialise the packet receive queues.
6901 */
6902
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006903 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006904 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905
Changli Gaodee42872010-05-02 05:42:16 +00006906 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006907 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006908 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006909 sd->completion_queue = NULL;
6910 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006911 sd->output_queue = NULL;
6912 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006913#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006914 sd->csd.func = rps_trigger_softirq;
6915 sd->csd.info = sd;
6916 sd->csd.flags = 0;
6917 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006918#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006919
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006920 sd->backlog.poll = process_backlog;
6921 sd->backlog.weight = weight_p;
6922 sd->backlog.gro_list = NULL;
6923 sd->backlog.gro_count = 0;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00006924
6925#ifdef CONFIG_NET_FLOW_LIMIT
6926 sd->flow_limit = NULL;
6927#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006928 }
6929
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930 dev_boot_phase = 0;
6931
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006932 /* The loopback device is special if any other network devices
6933 * is present in a network namespace the loopback device must
6934 * be present. Since we now dynamically allocate and free the
6935 * loopback device ensure this invariant is maintained by
6936 * keeping the loopback device as the first device on the
6937 * list of network devices. Ensuring the loopback devices
6938 * is the first device that appears and the last network device
6939 * that disappears.
6940 */
6941 if (register_pernet_device(&loopback_net_ops))
6942 goto out;
6943
6944 if (register_pernet_device(&default_device_ops))
6945 goto out;
6946
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006947 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6948 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006949
6950 hotcpu_notifier(dev_cpu_callback, 0);
6951 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006952 rc = 0;
6953out:
6954 return rc;
6955}
6956
6957subsys_initcall(net_dev_init);