blob: 54fce6006a83a4f213fde226979f72ae9a4e4473 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000142static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000143struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
144struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000145static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700148 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 * semaphore.
150 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800151 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 *
153 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700154 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 * actual updates. This allows pure readers to access the list even
156 * while a writer is preparing to update it.
157 *
158 * To put it another way, dev_base_lock is held for writing only to
159 * protect against pure readers; the rtnl semaphore provides the
160 * protection against other writers.
161 *
162 * See, for example usages, register_netdevice() and
163 * unregister_netdevice(), which must be called with the rtnl
164 * semaphore held.
165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167EXPORT_SYMBOL(dev_base_lock);
168
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000169seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000170
Thomas Graf4e985ad2011-06-21 03:11:20 +0000171static inline void dev_base_seq_inc(struct net *net)
172{
173 while (++net->dev_base_seq == 0);
174}
175
Eric W. Biederman881d9662007-09-17 11:56:21 -0700176static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Eric Dumazet95c96172012-04-15 05:58:06 +0000178 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
179
stephen hemminger08e98972009-11-10 07:20:34 +0000180 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
Eric W. Biederman881d9662007-09-17 11:56:21 -0700183static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700185 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000188static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000189{
190#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000191 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000192#endif
193}
194
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000195static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000196{
197#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000198 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000199#endif
200}
201
Eric W. Biedermance286d32007-09-12 13:53:49 +0200202/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000203static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200204{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900205 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200206
207 ASSERT_RTNL();
208
209 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800210 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000211 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000212 hlist_add_head_rcu(&dev->index_hlist,
213 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000215
216 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200217}
218
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219/* Device list removal
220 * caller must respect a RCU grace period before freeing/reusing dev
221 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222static void unlist_netdevice(struct net_device *dev)
223{
224 ASSERT_RTNL();
225
226 /* Unlink dev from the device chain */
227 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800228 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000229 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000230 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000232
233 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234}
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/*
237 * Our notifier list
238 */
239
Alan Sternf07d5b92006-05-09 15:23:03 -0700240static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242/*
243 * Device drivers call our routines to queue packets here. We empty the
244 * queue in the local softnet handler.
245 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700246
Eric Dumazet9958da02010-04-17 04:17:02 +0000247DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700248EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
David S. Millercf508b12008-07-22 14:16:42 -0700250#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700251/*
David S. Millerc773e842008-07-08 23:13:53 -0700252 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253 * according to dev->type
254 */
255static const unsigned short netdev_lock_type[] =
256 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
257 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
258 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
259 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
260 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
261 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
262 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
263 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
264 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
265 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
266 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
267 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400268 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
269 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
270 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700271
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700272static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
274 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
275 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
276 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
277 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
278 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
279 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
280 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
281 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
282 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
283 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
284 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400285 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
286 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
287 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700288
289static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700290static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700291
292static inline unsigned short netdev_lock_pos(unsigned short dev_type)
293{
294 int i;
295
296 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
297 if (netdev_lock_type[i] == dev_type)
298 return i;
299 /* the last key is used by default */
300 return ARRAY_SIZE(netdev_lock_type) - 1;
301}
302
David S. Millercf508b12008-07-22 14:16:42 -0700303static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
304 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305{
306 int i;
307
308 i = netdev_lock_pos(dev_type);
309 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
310 netdev_lock_name[i]);
311}
David S. Millercf508b12008-07-22 14:16:42 -0700312
313static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
314{
315 int i;
316
317 i = netdev_lock_pos(dev->type);
318 lockdep_set_class_and_name(&dev->addr_list_lock,
319 &netdev_addr_lock_key[i],
320 netdev_lock_name[i]);
321}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700322#else
David S. Millercf508b12008-07-22 14:16:42 -0700323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
325{
326}
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700328{
329}
330#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332/*******************************************************************************
333
334 Protocol management and registration routines
335
336*******************************************************************************/
337
338/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * Add a protocol ID to the list. Now that the input handler is
340 * smarter we can dispense with all the messy stuff that used to be
341 * here.
342 *
343 * BEWARE!!! Protocol handlers, mangling input packets,
344 * MUST BE last in hash buckets and checking protocol handlers
345 * MUST start from promiscuous ptype_all chain in net_bh.
346 * It is true now, do not change it.
347 * Explanation follows: if protocol handler, mangling packet, will
348 * be the first on list, it is not able to sense, that packet
349 * is cloned and should be copied-on-write, so that it will
350 * change it and subsequent readers will get broken packet.
351 * --ANK (980803)
352 */
353
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000354static inline struct list_head *ptype_head(const struct packet_type *pt)
355{
356 if (pt->type == htons(ETH_P_ALL))
357 return &ptype_all;
358 else
359 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
360}
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362/**
363 * dev_add_pack - add packet handler
364 * @pt: packet type declaration
365 *
366 * Add a protocol handler to the networking stack. The passed &packet_type
367 * is linked into kernel lists and may not be freed until it has been
368 * removed from the kernel lists.
369 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900370 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 * guarantee all CPU's that are in middle of receiving packets
372 * will see the new packet type (until the next received packet).
373 */
374
375void dev_add_pack(struct packet_type *pt)
376{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000377 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000379 spin_lock(&ptype_lock);
380 list_add_rcu(&pt->list, head);
381 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700383EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
388 *
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 *
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
397 */
398void __dev_remove_pack(struct packet_type *pt)
399{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000400 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct packet_type *pt1;
402
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000403 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 list_for_each_entry(pt1, head, list) {
406 if (pt == pt1) {
407 list_del_rcu(&pt->list);
408 goto out;
409 }
410 }
411
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000412 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000414 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700416EXPORT_SYMBOL(__dev_remove_pack);
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/**
419 * dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
421 *
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
425 * returns.
426 *
427 * This call sleeps to guarantee that no CPU is looking at the packet
428 * type after return.
429 */
430void dev_remove_pack(struct packet_type *pt)
431{
432 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 synchronize_net();
435}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700436EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Vlad Yasevich62532da2012-11-15 08:49:10 +0000438
439/**
440 * dev_add_offload - register offload handlers
441 * @po: protocol offload declaration
442 *
443 * Add protocol offload handlers to the networking stack. The passed
444 * &proto_offload is linked into kernel lists and may not be freed until
445 * it has been removed from the kernel lists.
446 *
447 * This call does not sleep therefore it can not
448 * guarantee all CPU's that are in middle of receiving packets
449 * will see the new offload handlers (until the next received packet).
450 */
451void dev_add_offload(struct packet_offload *po)
452{
453 struct list_head *head = &offload_base;
454
455 spin_lock(&offload_lock);
456 list_add_rcu(&po->list, head);
457 spin_unlock(&offload_lock);
458}
459EXPORT_SYMBOL(dev_add_offload);
460
461/**
462 * __dev_remove_offload - remove offload handler
463 * @po: packet offload declaration
464 *
465 * Remove a protocol offload handler that was previously added to the
466 * kernel offload handlers by dev_add_offload(). The passed &offload_type
467 * is removed from the kernel lists and can be freed or reused once this
468 * function returns.
469 *
470 * The packet type might still be in use by receivers
471 * and must not be freed until after all the CPU's have gone
472 * through a quiescent state.
473 */
474void __dev_remove_offload(struct packet_offload *po)
475{
476 struct list_head *head = &offload_base;
477 struct packet_offload *po1;
478
Eric Dumazetc53aa502012-11-16 08:08:23 +0000479 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000480
481 list_for_each_entry(po1, head, list) {
482 if (po == po1) {
483 list_del_rcu(&po->list);
484 goto out;
485 }
486 }
487
488 pr_warn("dev_remove_offload: %p not found\n", po);
489out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000490 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000491}
492EXPORT_SYMBOL(__dev_remove_offload);
493
494/**
495 * dev_remove_offload - remove packet offload handler
496 * @po: packet offload declaration
497 *
498 * Remove a packet offload handler that was previously added to the kernel
499 * offload handlers by dev_add_offload(). The passed &offload_type is
500 * removed from the kernel lists and can be freed or reused once this
501 * function returns.
502 *
503 * This call sleeps to guarantee that no CPU is looking at the packet
504 * type after return.
505 */
506void dev_remove_offload(struct packet_offload *po)
507{
508 __dev_remove_offload(po);
509
510 synchronize_net();
511}
512EXPORT_SYMBOL(dev_remove_offload);
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514/******************************************************************************
515
516 Device Boot-time Settings Routines
517
518*******************************************************************************/
519
520/* Boot time configuration table */
521static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
522
523/**
524 * netdev_boot_setup_add - add new setup entry
525 * @name: name of the device
526 * @map: configured settings for the device
527 *
528 * Adds new setup entry to the dev_boot_setup list. The function
529 * returns 0 on error and 1 on success. This is a generic routine to
530 * all netdevices.
531 */
532static int netdev_boot_setup_add(char *name, struct ifmap *map)
533{
534 struct netdev_boot_setup *s;
535 int i;
536
537 s = dev_boot_setup;
538 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
539 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
540 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700541 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 memcpy(&s[i].map, map, sizeof(s[i].map));
543 break;
544 }
545 }
546
547 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
548}
549
550/**
551 * netdev_boot_setup_check - check boot time settings
552 * @dev: the netdevice
553 *
554 * Check boot time settings for the device.
555 * The found settings are set for the device to be used
556 * later in the device probing.
557 * Returns 0 if no settings found, 1 if they are.
558 */
559int netdev_boot_setup_check(struct net_device *dev)
560{
561 struct netdev_boot_setup *s = dev_boot_setup;
562 int i;
563
564 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
565 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700566 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 dev->irq = s[i].map.irq;
568 dev->base_addr = s[i].map.base_addr;
569 dev->mem_start = s[i].map.mem_start;
570 dev->mem_end = s[i].map.mem_end;
571 return 1;
572 }
573 }
574 return 0;
575}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700576EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578
579/**
580 * netdev_boot_base - get address from boot time settings
581 * @prefix: prefix for network device
582 * @unit: id for network device
583 *
584 * Check boot time settings for the base address of device.
585 * The found settings are set for the device to be used
586 * later in the device probing.
587 * Returns 0 if no settings found.
588 */
589unsigned long netdev_boot_base(const char *prefix, int unit)
590{
591 const struct netdev_boot_setup *s = dev_boot_setup;
592 char name[IFNAMSIZ];
593 int i;
594
595 sprintf(name, "%s%d", prefix, unit);
596
597 /*
598 * If device already registered then return base of 1
599 * to indicate not to probe for this interface
600 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700601 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return 1;
603
604 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
605 if (!strcmp(name, s[i].name))
606 return s[i].map.base_addr;
607 return 0;
608}
609
610/*
611 * Saves at boot time configured settings for any netdevice.
612 */
613int __init netdev_boot_setup(char *str)
614{
615 int ints[5];
616 struct ifmap map;
617
618 str = get_options(str, ARRAY_SIZE(ints), ints);
619 if (!str || !*str)
620 return 0;
621
622 /* Save settings */
623 memset(&map, 0, sizeof(map));
624 if (ints[0] > 0)
625 map.irq = ints[1];
626 if (ints[0] > 1)
627 map.base_addr = ints[2];
628 if (ints[0] > 2)
629 map.mem_start = ints[3];
630 if (ints[0] > 3)
631 map.mem_end = ints[4];
632
633 /* Add new entry to the list */
634 return netdev_boot_setup_add(str, &map);
635}
636
637__setup("netdev=", netdev_boot_setup);
638
639/*******************************************************************************
640
641 Device Interface Subroutines
642
643*******************************************************************************/
644
645/**
646 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @name: name to find
649 *
650 * Find an interface by name. Must be called under RTNL semaphore
651 * or @dev_base_lock. If the name is found a pointer to the device
652 * is returned. If the name is not found then %NULL is returned. The
653 * reference counters are not incremented so the caller must be
654 * careful with locks.
655 */
656
Eric W. Biederman881d9662007-09-17 11:56:21 -0700657struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700659 struct net_device *dev;
660 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Sasha Levinb67bfe02013-02-27 17:06:00 -0800662 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (!strncmp(dev->name, name, IFNAMSIZ))
664 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return NULL;
667}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700668EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000671 * dev_get_by_name_rcu - find a device by its name
672 * @net: the applicable net namespace
673 * @name: name to find
674 *
675 * Find an interface by name.
676 * If the name is found a pointer to the device is returned.
677 * If the name is not found then %NULL is returned.
678 * The reference counters are not incremented so the caller must be
679 * careful with locks. The caller must hold RCU lock.
680 */
681
682struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
683{
Eric Dumazet72c95282009-10-30 07:11:27 +0000684 struct net_device *dev;
685 struct hlist_head *head = dev_name_hash(net, name);
686
Sasha Levinb67bfe02013-02-27 17:06:00 -0800687 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000688 if (!strncmp(dev->name, name, IFNAMSIZ))
689 return dev;
690
691 return NULL;
692}
693EXPORT_SYMBOL(dev_get_by_name_rcu);
694
695/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700697 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 * @name: name to find
699 *
700 * Find an interface by name. This can be called from any
701 * context and does its own locking. The returned handle has
702 * the usage count incremented and the caller must use dev_put() to
703 * release it when it is no longer needed. %NULL is returned if no
704 * matching device is found.
705 */
706
Eric W. Biederman881d9662007-09-17 11:56:21 -0700707struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 struct net_device *dev;
710
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 rcu_read_lock();
712 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (dev)
714 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000715 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return dev;
717}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700718EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720/**
721 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700722 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 * @ifindex: index of device
724 *
725 * Search for an interface by index. Returns %NULL if the device
726 * is not found or a pointer to the device. The device has not
727 * had its reference counter increased so the caller must be careful
728 * about locking. The caller must hold either the RTNL semaphore
729 * or @dev_base_lock.
730 */
731
Eric W. Biederman881d9662007-09-17 11:56:21 -0700732struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700734 struct net_device *dev;
735 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Sasha Levinb67bfe02013-02-27 17:06:00 -0800737 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 if (dev->ifindex == ifindex)
739 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return NULL;
742}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700743EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000745/**
746 * dev_get_by_index_rcu - find a device by its ifindex
747 * @net: the applicable net namespace
748 * @ifindex: index of device
749 *
750 * Search for an interface by index. Returns %NULL if the device
751 * is not found or a pointer to the device. The device has not
752 * had its reference counter increased so the caller must be careful
753 * about locking. The caller must hold RCU lock.
754 */
755
756struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
757{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000758 struct net_device *dev;
759 struct hlist_head *head = dev_index_hash(net, ifindex);
760
Sasha Levinb67bfe02013-02-27 17:06:00 -0800761 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000762 if (dev->ifindex == ifindex)
763 return dev;
764
765 return NULL;
766}
767EXPORT_SYMBOL(dev_get_by_index_rcu);
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770/**
771 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700772 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * @ifindex: index of device
774 *
775 * Search for an interface by index. Returns NULL if the device
776 * is not found or a pointer to the device. The device returned has
777 * had a reference added and the pointer is safe until the user calls
778 * dev_put to indicate they have finished with it.
779 */
780
Eric W. Biederman881d9662007-09-17 11:56:21 -0700781struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 struct net_device *dev;
784
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000785 rcu_read_lock();
786 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (dev)
788 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000789 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return dev;
791}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700792EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000795 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700796 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 * @type: media type of device
798 * @ha: hardware address
799 *
800 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800801 * is not found or a pointer to the device.
802 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000803 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 * and the caller must therefore be careful about locking
805 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 */
807
Eric Dumazet941666c2010-12-05 01:23:53 +0000808struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
809 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810{
811 struct net_device *dev;
812
Eric Dumazet941666c2010-12-05 01:23:53 +0000813 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (dev->type == type &&
815 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700816 return dev;
817
818 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
Eric Dumazet941666c2010-12-05 01:23:53 +0000820EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300821
Eric W. Biederman881d9662007-09-17 11:56:21 -0700822struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700823{
824 struct net_device *dev;
825
826 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700827 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700828 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700829 return dev;
830
831 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700832}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700833EXPORT_SYMBOL(__dev_getfirstbyhwtype);
834
Eric W. Biederman881d9662007-09-17 11:56:21 -0700835struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000837 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000839 rcu_read_lock();
840 for_each_netdev_rcu(net, dev)
841 if (dev->type == type) {
842 dev_hold(dev);
843 ret = dev;
844 break;
845 }
846 rcu_read_unlock();
847 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849EXPORT_SYMBOL(dev_getfirstbyhwtype);
850
851/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000852 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700853 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 * @if_flags: IFF_* values
855 * @mask: bitmask of bits in if_flags to check
856 *
857 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000858 * is not found or a pointer to the device. Must be called inside
859 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 */
861
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000862struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700863 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700865 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Pavel Emelianov7562f872007-05-03 15:13:45 -0700867 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800868 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700870 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 break;
872 }
873 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700874 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000876EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878/**
879 * dev_valid_name - check if name is okay for network device
880 * @name: name string
881 *
882 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700883 * to allow sysfs to work. We also disallow any kind of
884 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 */
David S. Miller95f050b2012-03-06 16:12:15 -0500886bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700888 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500889 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700890 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500891 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700892 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500893 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700894
895 while (*name) {
896 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500897 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700898 name++;
899 }
David S. Miller95f050b2012-03-06 16:12:15 -0500900 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700902EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200905 * __dev_alloc_name - allocate a name for a device
906 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200908 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 *
910 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700911 * id. It scans list of devices to build up a free map, then chooses
912 * the first empty slot. The caller must hold the dev_base or rtnl lock
913 * while allocating the name and adding the device in order to avoid
914 * duplicates.
915 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
916 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 */
918
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200919static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
921 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 const char *p;
923 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700924 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 struct net_device *d;
926
927 p = strnchr(name, IFNAMSIZ-1, '%');
928 if (p) {
929 /*
930 * Verify the string as this thing may have come from
931 * the user. There must be either one "%d" and no other "%"
932 * characters.
933 */
934 if (p[1] != 'd' || strchr(p + 2, '%'))
935 return -EINVAL;
936
937 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700938 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 if (!inuse)
940 return -ENOMEM;
941
Eric W. Biederman881d9662007-09-17 11:56:21 -0700942 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (!sscanf(d->name, name, &i))
944 continue;
945 if (i < 0 || i >= max_netdevices)
946 continue;
947
948 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200949 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if (!strncmp(buf, d->name, IFNAMSIZ))
951 set_bit(i, inuse);
952 }
953
954 i = find_first_zero_bit(inuse, max_netdevices);
955 free_page((unsigned long) inuse);
956 }
957
Octavian Purdilad9031022009-11-18 02:36:59 +0000958 if (buf != name)
959 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200960 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963 /* It is possible to run out of possible slots
964 * when the name is long and there isn't enough space left
965 * for the digits, or if all bits are used.
966 */
967 return -ENFILE;
968}
969
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200970/**
971 * dev_alloc_name - allocate a name for a device
972 * @dev: device
973 * @name: name format string
974 *
975 * Passed a format string - eg "lt%d" it will try and find a suitable
976 * id. It scans list of devices to build up a free map, then chooses
977 * the first empty slot. The caller must hold the dev_base or rtnl lock
978 * while allocating the name and adding the device in order to avoid
979 * duplicates.
980 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
981 * Returns the number of the unit assigned or a negative errno code.
982 */
983
984int dev_alloc_name(struct net_device *dev, const char *name)
985{
986 char buf[IFNAMSIZ];
987 struct net *net;
988 int ret;
989
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900990 BUG_ON(!dev_net(dev));
991 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 ret = __dev_alloc_name(net, name, buf);
993 if (ret >= 0)
994 strlcpy(dev->name, buf, IFNAMSIZ);
995 return ret;
996}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700997EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200998
Gao feng828de4f2012-09-13 20:58:27 +0000999static int dev_alloc_name_ns(struct net *net,
1000 struct net_device *dev,
1001 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001002{
Gao feng828de4f2012-09-13 20:58:27 +00001003 char buf[IFNAMSIZ];
1004 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001005
Gao feng828de4f2012-09-13 20:58:27 +00001006 ret = __dev_alloc_name(net, name, buf);
1007 if (ret >= 0)
1008 strlcpy(dev->name, buf, IFNAMSIZ);
1009 return ret;
1010}
1011
1012static int dev_get_valid_name(struct net *net,
1013 struct net_device *dev,
1014 const char *name)
1015{
1016 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001017
Octavian Purdilad9031022009-11-18 02:36:59 +00001018 if (!dev_valid_name(name))
1019 return -EINVAL;
1020
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001021 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001022 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001023 else if (__dev_get_by_name(net, name))
1024 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001025 else if (dev->name != name)
1026 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001027
1028 return 0;
1029}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031/**
1032 * dev_change_name - change name of a device
1033 * @dev: device
1034 * @newname: name (or format string) must be at least IFNAMSIZ
1035 *
1036 * Change name of a device, can pass format strings "eth%d".
1037 * for wildcarding.
1038 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001039int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040{
Herbert Xufcc5a032007-07-30 17:03:38 -07001041 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001043 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001044 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001047 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001049 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 if (dev->flags & IFF_UP)
1051 return -EBUSY;
1052
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001053 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001054
1055 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001056 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001057 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001058 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001059
Herbert Xufcc5a032007-07-30 17:03:38 -07001060 memcpy(oldname, dev->name, IFNAMSIZ);
1061
Gao feng828de4f2012-09-13 20:58:27 +00001062 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001063 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001064 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Herbert Xufcc5a032007-07-30 17:03:38 -07001068rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001069 ret = device_rename(&dev->dev, dev->name);
1070 if (ret) {
1071 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001072 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001073 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001074 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001075
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001076 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001077
Herbert Xu7f988ea2007-07-30 16:35:46 -07001078 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001079 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001080 write_unlock_bh(&dev_base_lock);
1081
1082 synchronize_rcu();
1083
1084 write_lock_bh(&dev_base_lock);
1085 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001086 write_unlock_bh(&dev_base_lock);
1087
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001088 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001089 ret = notifier_to_errno(ret);
1090
1091 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001092 /* err >= 0 after dev_alloc_name() or stores the first errno */
1093 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001094 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001095 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001096 memcpy(dev->name, oldname, IFNAMSIZ);
1097 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001098 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001099 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001100 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001101 }
1102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 return err;
1105}
1106
1107/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001108 * dev_set_alias - change ifalias of a device
1109 * @dev: device
1110 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001111 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001112 *
1113 * Set ifalias for a device,
1114 */
1115int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1116{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001117 char *new_ifalias;
1118
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001119 ASSERT_RTNL();
1120
1121 if (len >= IFALIASZ)
1122 return -EINVAL;
1123
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001124 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001125 kfree(dev->ifalias);
1126 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001127 return 0;
1128 }
1129
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001130 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1131 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001132 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001133 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001134
1135 strlcpy(dev->ifalias, alias, len+1);
1136 return len;
1137}
1138
1139
1140/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001141 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001142 * @dev: device to cause notification
1143 *
1144 * Called to indicate a device has changed features.
1145 */
1146void netdev_features_change(struct net_device *dev)
1147{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001148 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001149}
1150EXPORT_SYMBOL(netdev_features_change);
1151
1152/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 * netdev_state_change - device changes state
1154 * @dev: device to cause notification
1155 *
1156 * Called to indicate a device has changed state. This function calls
1157 * the notifier chains for netdev_chain and sends a NEWLINK message
1158 * to the routing socket.
1159 */
1160void netdev_state_change(struct net_device *dev)
1161{
1162 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001163 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1165 }
1166}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001167EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Amerigo Wangee89bab2012-08-09 22:14:56 +00001169/**
1170 * netdev_notify_peers - notify network peers about existence of @dev
1171 * @dev: network device
1172 *
1173 * Generate traffic such that interested network peers are aware of
1174 * @dev, such as by generating a gratuitous ARP. This may be used when
1175 * a device wants to inform the rest of the network about some sort of
1176 * reconfiguration such as a failover event or virtual machine
1177 * migration.
1178 */
1179void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001180{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001181 rtnl_lock();
1182 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1183 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001184}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001185EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001186
Patrick McHardybd380812010-02-26 06:34:53 +00001187static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001189 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001190 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001192 ASSERT_RTNL();
1193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 if (!netif_device_present(dev))
1195 return -ENODEV;
1196
Neil Hormanca99ca12013-02-05 08:05:43 +00001197 /* Block netpoll from trying to do any rx path servicing.
1198 * If we don't do this there is a chance ndo_poll_controller
1199 * or ndo_poll may be running while we open the device
1200 */
dingtianhongda6e3782013-05-27 19:53:31 +00001201 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001202
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001203 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1204 ret = notifier_to_errno(ret);
1205 if (ret)
1206 return ret;
1207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001209
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001210 if (ops->ndo_validate_addr)
1211 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001212
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001213 if (!ret && ops->ndo_open)
1214 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Neil Hormanca99ca12013-02-05 08:05:43 +00001216 netpoll_rx_enable(dev);
1217
Jeff Garzikbada3392007-10-23 20:19:37 -07001218 if (ret)
1219 clear_bit(__LINK_STATE_START, &dev->state);
1220 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001222 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001223 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001225 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 return ret;
1229}
Patrick McHardybd380812010-02-26 06:34:53 +00001230
1231/**
1232 * dev_open - prepare an interface for use.
1233 * @dev: device to open
1234 *
1235 * Takes a device from down to up state. The device's private open
1236 * function is invoked and then the multicast lists are loaded. Finally
1237 * the device is moved into the up state and a %NETDEV_UP message is
1238 * sent to the netdev notifier chain.
1239 *
1240 * Calling this function on an active interface is a nop. On a failure
1241 * a negative errno code is returned.
1242 */
1243int dev_open(struct net_device *dev)
1244{
1245 int ret;
1246
Patrick McHardybd380812010-02-26 06:34:53 +00001247 if (dev->flags & IFF_UP)
1248 return 0;
1249
Patrick McHardybd380812010-02-26 06:34:53 +00001250 ret = __dev_open(dev);
1251 if (ret < 0)
1252 return ret;
1253
Patrick McHardybd380812010-02-26 06:34:53 +00001254 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1255 call_netdevice_notifiers(NETDEV_UP, dev);
1256
1257 return ret;
1258}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001259EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Octavian Purdila44345722010-12-13 12:44:07 +00001261static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262{
Octavian Purdila44345722010-12-13 12:44:07 +00001263 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001264
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001265 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001266 might_sleep();
1267
Octavian Purdila44345722010-12-13 12:44:07 +00001268 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001269 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Octavian Purdila44345722010-12-13 12:44:07 +00001271 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Octavian Purdila44345722010-12-13 12:44:07 +00001273 /* Synchronize to scheduled poll. We cannot touch poll list, it
1274 * can be even on different cpu. So just clear netif_running().
1275 *
1276 * dev->stop() will invoke napi_disable() on all of it's
1277 * napi_struct instances on this device.
1278 */
1279 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Octavian Purdila44345722010-12-13 12:44:07 +00001282 dev_deactivate_many(head);
1283
1284 list_for_each_entry(dev, head, unreg_list) {
1285 const struct net_device_ops *ops = dev->netdev_ops;
1286
1287 /*
1288 * Call the device specific close. This cannot fail.
1289 * Only if device is UP
1290 *
1291 * We allow it to be called even after a DETACH hot-plug
1292 * event.
1293 */
1294 if (ops->ndo_stop)
1295 ops->ndo_stop(dev);
1296
Octavian Purdila44345722010-12-13 12:44:07 +00001297 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001298 net_dmaengine_put();
1299 }
1300
1301 return 0;
1302}
1303
1304static int __dev_close(struct net_device *dev)
1305{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001306 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001307 LIST_HEAD(single);
1308
Neil Hormanca99ca12013-02-05 08:05:43 +00001309 /* Temporarily disable netpoll until the interface is down */
dingtianhongda6e3782013-05-27 19:53:31 +00001310 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001311
Octavian Purdila44345722010-12-13 12:44:07 +00001312 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001313 retval = __dev_close_many(&single);
1314 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001315
1316 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001317 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001318}
1319
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001320static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001321{
1322 struct net_device *dev, *tmp;
1323 LIST_HEAD(tmp_list);
1324
1325 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1326 if (!(dev->flags & IFF_UP))
1327 list_move(&dev->unreg_list, &tmp_list);
1328
1329 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001330
Octavian Purdila44345722010-12-13 12:44:07 +00001331 list_for_each_entry(dev, head, unreg_list) {
1332 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1333 call_netdevice_notifiers(NETDEV_DOWN, dev);
1334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Octavian Purdila44345722010-12-13 12:44:07 +00001336 /* rollback_registered_many needs the complete original list */
1337 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 return 0;
1339}
Patrick McHardybd380812010-02-26 06:34:53 +00001340
1341/**
1342 * dev_close - shutdown an interface.
1343 * @dev: device to shutdown
1344 *
1345 * This function moves an active device into down state. A
1346 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1347 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1348 * chain.
1349 */
1350int dev_close(struct net_device *dev)
1351{
Eric Dumazete14a5992011-05-10 12:26:06 -07001352 if (dev->flags & IFF_UP) {
1353 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001354
Neil Hormanca99ca12013-02-05 08:05:43 +00001355 /* Block netpoll rx while the interface is going down */
dingtianhongda6e3782013-05-27 19:53:31 +00001356 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001357
Eric Dumazete14a5992011-05-10 12:26:06 -07001358 list_add(&dev->unreg_list, &single);
1359 dev_close_many(&single);
1360 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001361
1362 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001363 }
dingtianhongda6e3782013-05-27 19:53:31 +00001364 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001365}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001366EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001369/**
1370 * dev_disable_lro - disable Large Receive Offload on a device
1371 * @dev: device
1372 *
1373 * Disable Large Receive Offload (LRO) on a net device. Must be
1374 * called under RTNL. This is needed if received packets may be
1375 * forwarded to another interface.
1376 */
1377void dev_disable_lro(struct net_device *dev)
1378{
Neil Hormanf11970e2011-05-24 08:31:09 +00001379 /*
1380 * If we're trying to disable lro on a vlan device
1381 * use the underlying physical device instead
1382 */
1383 if (is_vlan_dev(dev))
1384 dev = vlan_dev_real_dev(dev);
1385
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001386 dev->wanted_features &= ~NETIF_F_LRO;
1387 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001388
Michał Mirosław22d59692011-04-21 12:42:15 +00001389 if (unlikely(dev->features & NETIF_F_LRO))
1390 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001391}
1392EXPORT_SYMBOL(dev_disable_lro);
1393
Jiri Pirko351638e2013-05-28 01:30:21 +00001394static void netdev_notifier_info_init(struct netdev_notifier_info *info,
1395 struct net_device *dev)
1396{
1397 info->dev = dev;
1398}
1399
1400static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1401 struct net_device *dev)
1402{
1403 struct netdev_notifier_info info;
1404
1405 netdev_notifier_info_init(&info, dev);
1406 return nb->notifier_call(nb, val, &info);
1407}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001408
Eric W. Biederman881d9662007-09-17 11:56:21 -07001409static int dev_boot_phase = 1;
1410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411/**
1412 * register_netdevice_notifier - register a network notifier block
1413 * @nb: notifier
1414 *
1415 * Register a notifier to be called when network device events occur.
1416 * The notifier passed is linked into the kernel structures and must
1417 * not be reused until it has been unregistered. A negative errno code
1418 * is returned on a failure.
1419 *
1420 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001421 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 * view of the network device list.
1423 */
1424
1425int register_netdevice_notifier(struct notifier_block *nb)
1426{
1427 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001428 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001429 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 int err;
1431
1432 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001433 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001434 if (err)
1435 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001436 if (dev_boot_phase)
1437 goto unlock;
1438 for_each_net(net) {
1439 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001440 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001441 err = notifier_to_errno(err);
1442 if (err)
1443 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
Eric W. Biederman881d9662007-09-17 11:56:21 -07001445 if (!(dev->flags & IFF_UP))
1446 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001447
Jiri Pirko351638e2013-05-28 01:30:21 +00001448 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001449 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001451
1452unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 rtnl_unlock();
1454 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001455
1456rollback:
1457 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001458 for_each_net(net) {
1459 for_each_netdev(net, dev) {
1460 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001461 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001462
Eric W. Biederman881d9662007-09-17 11:56:21 -07001463 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001464 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1465 dev);
1466 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001467 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001468 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001469 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001470 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001471
RongQing.Li8f891482011-11-30 23:43:07 -05001472outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001473 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001474 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001476EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478/**
1479 * unregister_netdevice_notifier - unregister a network notifier block
1480 * @nb: notifier
1481 *
1482 * Unregister a notifier previously registered by
1483 * register_netdevice_notifier(). The notifier is unlinked into the
1484 * kernel structures and may then be reused. A negative errno code
1485 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001486 *
1487 * After unregistering unregister and down device events are synthesized
1488 * for all devices on the device list to the removed notifier to remove
1489 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 */
1491
1492int unregister_netdevice_notifier(struct notifier_block *nb)
1493{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001494 struct net_device *dev;
1495 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001496 int err;
1497
1498 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001499 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001500 if (err)
1501 goto unlock;
1502
1503 for_each_net(net) {
1504 for_each_netdev(net, dev) {
1505 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001506 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1507 dev);
1508 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001509 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001510 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001511 }
1512 }
1513unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001514 rtnl_unlock();
1515 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001517EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
1519/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001520 * call_netdevice_notifiers_info - call all network notifier blocks
1521 * @val: value passed unmodified to notifier function
1522 * @dev: net_device pointer passed unmodified to notifier function
1523 * @info: notifier information data
1524 *
1525 * Call all network notifier blocks. Parameters and return value
1526 * are as for raw_notifier_call_chain().
1527 */
1528
1529int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1530 struct netdev_notifier_info *info)
1531{
1532 ASSERT_RTNL();
1533 netdev_notifier_info_init(info, dev);
1534 return raw_notifier_call_chain(&netdev_chain, val, info);
1535}
1536EXPORT_SYMBOL(call_netdevice_notifiers_info);
1537
1538/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 * call_netdevice_notifiers - call all network notifier blocks
1540 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001541 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 *
1543 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001544 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 */
1546
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001547int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548{
Jiri Pirko351638e2013-05-28 01:30:21 +00001549 struct netdev_notifier_info info;
1550
1551 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001553EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Ingo Molnarc5905af2012-02-24 08:31:31 +01001555static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001556#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001557/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001558 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001559 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001560 */
1561static atomic_t netstamp_needed_deferred;
1562#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
1564void net_enable_timestamp(void)
1565{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001566#ifdef HAVE_JUMP_LABEL
1567 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1568
1569 if (deferred) {
1570 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001571 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001572 return;
1573 }
1574#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001575 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001577EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
1579void net_disable_timestamp(void)
1580{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001581#ifdef HAVE_JUMP_LABEL
1582 if (in_interrupt()) {
1583 atomic_inc(&netstamp_needed_deferred);
1584 return;
1585 }
1586#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001587 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001589EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Eric Dumazet3b098e22010-05-15 23:57:10 -07001591static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592{
Eric Dumazet588f0332011-11-15 04:12:55 +00001593 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001594 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001595 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596}
1597
Eric Dumazet588f0332011-11-15 04:12:55 +00001598#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001599 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001600 if ((COND) && !(SKB)->tstamp.tv64) \
1601 __net_timestamp(SKB); \
1602 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001603
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001604static inline bool is_skb_forwardable(struct net_device *dev,
1605 struct sk_buff *skb)
1606{
1607 unsigned int len;
1608
1609 if (!(dev->flags & IFF_UP))
1610 return false;
1611
1612 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1613 if (skb->len <= len)
1614 return true;
1615
1616 /* if TSO is enabled, we don't care about the length as the packet
1617 * could be forwarded without being segmented before
1618 */
1619 if (skb_is_gso(skb))
1620 return true;
1621
1622 return false;
1623}
1624
Arnd Bergmann44540962009-11-26 06:07:08 +00001625/**
1626 * dev_forward_skb - loopback an skb to another netif
1627 *
1628 * @dev: destination network device
1629 * @skb: buffer to forward
1630 *
1631 * return values:
1632 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001633 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001634 *
1635 * dev_forward_skb can be used for injecting an skb from the
1636 * start_xmit function of one device into the receive queue
1637 * of another device.
1638 *
1639 * The receiving device may be in another namespace, so
1640 * we have to clear all information in the skb that could
1641 * impact namespace isolation.
1642 */
1643int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1644{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001645 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1646 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1647 atomic_long_inc(&dev->rx_dropped);
1648 kfree_skb(skb);
1649 return NET_RX_DROP;
1650 }
1651 }
1652
Arnd Bergmann44540962009-11-26 06:07:08 +00001653 skb_orphan(skb);
1654
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001655 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001656 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001657 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001658 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001659 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001660 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001661 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001662 skb->tstamp.tv64 = 0;
1663 skb->pkt_type = PACKET_HOST;
1664 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001665 skb->mark = 0;
1666 secpath_reset(skb);
1667 nf_reset(skb);
Patrick McHardy124dff02013-04-05 20:42:05 +02001668 nf_reset_trace(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001669 return netif_rx(skb);
1670}
1671EXPORT_SYMBOL_GPL(dev_forward_skb);
1672
Changli Gao71d9dec2010-12-15 19:57:25 +00001673static inline int deliver_skb(struct sk_buff *skb,
1674 struct packet_type *pt_prev,
1675 struct net_device *orig_dev)
1676{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001677 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1678 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001679 atomic_inc(&skb->users);
1680 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1681}
1682
Eric Leblondc0de08d2012-08-16 22:02:58 +00001683static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1684{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001685 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001686 return false;
1687
1688 if (ptype->id_match)
1689 return ptype->id_match(ptype, skb->sk);
1690 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1691 return true;
1692
1693 return false;
1694}
1695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696/*
1697 * Support routine. Sends outgoing frames to any network
1698 * taps currently in use.
1699 */
1700
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001701static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702{
1703 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001704 struct sk_buff *skb2 = NULL;
1705 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 rcu_read_lock();
1708 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1709 /* Never send packets back to the socket
1710 * they originated from - MvS (miquels@drinkel.ow.org)
1711 */
1712 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001713 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001714 if (pt_prev) {
1715 deliver_skb(skb2, pt_prev, skb->dev);
1716 pt_prev = ptype;
1717 continue;
1718 }
1719
1720 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 if (!skb2)
1722 break;
1723
Eric Dumazet70978182010-12-20 21:22:51 +00001724 net_timestamp_set(skb2);
1725
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 /* skb->nh should be correctly
1727 set by sender, so that the second statement is
1728 just protection against buggy protocols.
1729 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001730 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001732 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001733 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001734 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1735 ntohs(skb2->protocol),
1736 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001737 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 }
1739
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001740 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001742 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 }
1744 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001745 if (pt_prev)
1746 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 rcu_read_unlock();
1748}
1749
Ben Hutchings2c530402012-07-10 10:55:09 +00001750/**
1751 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001752 * @dev: Network device
1753 * @txq: number of queues available
1754 *
1755 * If real_num_tx_queues is changed the tc mappings may no longer be
1756 * valid. To resolve this verify the tc mapping remains valid and if
1757 * not NULL the mapping. With no priorities mapping to this
1758 * offset/count pair it will no longer be used. In the worst case TC0
1759 * is invalid nothing can be done so disable priority mappings. If is
1760 * expected that drivers will fix this mapping if they can before
1761 * calling netif_set_real_num_tx_queues.
1762 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001763static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001764{
1765 int i;
1766 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1767
1768 /* If TC0 is invalidated disable TC mapping */
1769 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001770 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001771 dev->num_tc = 0;
1772 return;
1773 }
1774
1775 /* Invalidated prio to tc mappings set to TC0 */
1776 for (i = 1; i < TC_BITMASK + 1; i++) {
1777 int q = netdev_get_prio_tc_map(dev, i);
1778
1779 tc = &dev->tc_to_txq[q];
1780 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001781 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1782 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001783 netdev_set_prio_tc_map(dev, i, 0);
1784 }
1785 }
1786}
1787
Alexander Duyck537c00d2013-01-10 08:57:02 +00001788#ifdef CONFIG_XPS
1789static DEFINE_MUTEX(xps_map_mutex);
1790#define xmap_dereference(P) \
1791 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1792
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001793static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1794 int cpu, u16 index)
1795{
1796 struct xps_map *map = NULL;
1797 int pos;
1798
1799 if (dev_maps)
1800 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1801
1802 for (pos = 0; map && pos < map->len; pos++) {
1803 if (map->queues[pos] == index) {
1804 if (map->len > 1) {
1805 map->queues[pos] = map->queues[--map->len];
1806 } else {
1807 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1808 kfree_rcu(map, rcu);
1809 map = NULL;
1810 }
1811 break;
1812 }
1813 }
1814
1815 return map;
1816}
1817
Alexander Duyck024e9672013-01-10 08:57:46 +00001818static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001819{
1820 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001821 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001822 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001823
1824 mutex_lock(&xps_map_mutex);
1825 dev_maps = xmap_dereference(dev->xps_maps);
1826
1827 if (!dev_maps)
1828 goto out_no_maps;
1829
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001830 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001831 for (i = index; i < dev->num_tx_queues; i++) {
1832 if (!remove_xps_queue(dev_maps, cpu, i))
1833 break;
1834 }
1835 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001836 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001837 }
1838
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001839 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001840 RCU_INIT_POINTER(dev->xps_maps, NULL);
1841 kfree_rcu(dev_maps, rcu);
1842 }
1843
Alexander Duyck024e9672013-01-10 08:57:46 +00001844 for (i = index; i < dev->num_tx_queues; i++)
1845 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1846 NUMA_NO_NODE);
1847
Alexander Duyck537c00d2013-01-10 08:57:02 +00001848out_no_maps:
1849 mutex_unlock(&xps_map_mutex);
1850}
1851
Alexander Duyck01c5f862013-01-10 08:57:35 +00001852static struct xps_map *expand_xps_map(struct xps_map *map,
1853 int cpu, u16 index)
1854{
1855 struct xps_map *new_map;
1856 int alloc_len = XPS_MIN_MAP_ALLOC;
1857 int i, pos;
1858
1859 for (pos = 0; map && pos < map->len; pos++) {
1860 if (map->queues[pos] != index)
1861 continue;
1862 return map;
1863 }
1864
1865 /* Need to add queue to this CPU's existing map */
1866 if (map) {
1867 if (pos < map->alloc_len)
1868 return map;
1869
1870 alloc_len = map->alloc_len * 2;
1871 }
1872
1873 /* Need to allocate new map to store queue on this CPU's map */
1874 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1875 cpu_to_node(cpu));
1876 if (!new_map)
1877 return NULL;
1878
1879 for (i = 0; i < pos; i++)
1880 new_map->queues[i] = map->queues[i];
1881 new_map->alloc_len = alloc_len;
1882 new_map->len = pos;
1883
1884 return new_map;
1885}
1886
Alexander Duyck537c00d2013-01-10 08:57:02 +00001887int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1888{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001889 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001890 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001891 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001892 int cpu, numa_node_id = -2;
1893 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001894
1895 mutex_lock(&xps_map_mutex);
1896
1897 dev_maps = xmap_dereference(dev->xps_maps);
1898
Alexander Duyck01c5f862013-01-10 08:57:35 +00001899 /* allocate memory for queue storage */
1900 for_each_online_cpu(cpu) {
1901 if (!cpumask_test_cpu(cpu, mask))
1902 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001903
Alexander Duyck01c5f862013-01-10 08:57:35 +00001904 if (!new_dev_maps)
1905 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001906 if (!new_dev_maps) {
1907 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001908 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001909 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001910
1911 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1912 NULL;
1913
1914 map = expand_xps_map(map, cpu, index);
1915 if (!map)
1916 goto error;
1917
1918 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1919 }
1920
1921 if (!new_dev_maps)
1922 goto out_no_new_maps;
1923
1924 for_each_possible_cpu(cpu) {
1925 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1926 /* add queue to CPU maps */
1927 int pos = 0;
1928
1929 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1930 while ((pos < map->len) && (map->queues[pos] != index))
1931 pos++;
1932
1933 if (pos == map->len)
1934 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001935#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001936 if (numa_node_id == -2)
1937 numa_node_id = cpu_to_node(cpu);
1938 else if (numa_node_id != cpu_to_node(cpu))
1939 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001940#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001941 } else if (dev_maps) {
1942 /* fill in the new device map from the old device map */
1943 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1944 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001945 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001946
Alexander Duyck537c00d2013-01-10 08:57:02 +00001947 }
1948
Alexander Duyck01c5f862013-01-10 08:57:35 +00001949 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1950
Alexander Duyck537c00d2013-01-10 08:57:02 +00001951 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001952 if (dev_maps) {
1953 for_each_possible_cpu(cpu) {
1954 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1955 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1956 if (map && map != new_map)
1957 kfree_rcu(map, rcu);
1958 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001959
Alexander Duyck537c00d2013-01-10 08:57:02 +00001960 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001961 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001962
Alexander Duyck01c5f862013-01-10 08:57:35 +00001963 dev_maps = new_dev_maps;
1964 active = true;
1965
1966out_no_new_maps:
1967 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00001968 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1969 (numa_node_id >= 0) ? numa_node_id :
1970 NUMA_NO_NODE);
1971
Alexander Duyck01c5f862013-01-10 08:57:35 +00001972 if (!dev_maps)
1973 goto out_no_maps;
1974
1975 /* removes queue from unused CPUs */
1976 for_each_possible_cpu(cpu) {
1977 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
1978 continue;
1979
1980 if (remove_xps_queue(dev_maps, cpu, index))
1981 active = true;
1982 }
1983
1984 /* free map if not active */
1985 if (!active) {
1986 RCU_INIT_POINTER(dev->xps_maps, NULL);
1987 kfree_rcu(dev_maps, rcu);
1988 }
1989
1990out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00001991 mutex_unlock(&xps_map_mutex);
1992
1993 return 0;
1994error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00001995 /* remove any maps that we added */
1996 for_each_possible_cpu(cpu) {
1997 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1998 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1999 NULL;
2000 if (new_map && new_map != map)
2001 kfree(new_map);
2002 }
2003
Alexander Duyck537c00d2013-01-10 08:57:02 +00002004 mutex_unlock(&xps_map_mutex);
2005
Alexander Duyck537c00d2013-01-10 08:57:02 +00002006 kfree(new_dev_maps);
2007 return -ENOMEM;
2008}
2009EXPORT_SYMBOL(netif_set_xps_queue);
2010
2011#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002012/*
2013 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2014 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2015 */
Tom Herberte6484932010-10-18 18:04:39 +00002016int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002017{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002018 int rc;
2019
Tom Herberte6484932010-10-18 18:04:39 +00002020 if (txq < 1 || txq > dev->num_tx_queues)
2021 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002022
Ben Hutchings5c565802011-02-15 19:39:21 +00002023 if (dev->reg_state == NETREG_REGISTERED ||
2024 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002025 ASSERT_RTNL();
2026
Tom Herbert1d24eb42010-11-21 13:17:27 +00002027 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2028 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002029 if (rc)
2030 return rc;
2031
John Fastabend4f57c082011-01-17 08:06:04 +00002032 if (dev->num_tc)
2033 netif_setup_tc(dev, txq);
2034
Alexander Duyck024e9672013-01-10 08:57:46 +00002035 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002036 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002037#ifdef CONFIG_XPS
2038 netif_reset_xps_queues_gt(dev, txq);
2039#endif
2040 }
John Fastabendf0796d52010-07-01 13:21:57 +00002041 }
Tom Herberte6484932010-10-18 18:04:39 +00002042
2043 dev->real_num_tx_queues = txq;
2044 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002045}
2046EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002047
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002048#ifdef CONFIG_RPS
2049/**
2050 * netif_set_real_num_rx_queues - set actual number of RX queues used
2051 * @dev: Network device
2052 * @rxq: Actual number of RX queues
2053 *
2054 * This must be called either with the rtnl_lock held or before
2055 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002056 * negative error code. If called before registration, it always
2057 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002058 */
2059int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2060{
2061 int rc;
2062
Tom Herbertbd25fa72010-10-18 18:00:16 +00002063 if (rxq < 1 || rxq > dev->num_rx_queues)
2064 return -EINVAL;
2065
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002066 if (dev->reg_state == NETREG_REGISTERED) {
2067 ASSERT_RTNL();
2068
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002069 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2070 rxq);
2071 if (rc)
2072 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002073 }
2074
2075 dev->real_num_rx_queues = rxq;
2076 return 0;
2077}
2078EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2079#endif
2080
Ben Hutchings2c530402012-07-10 10:55:09 +00002081/**
2082 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002083 *
2084 * This routine should set an upper limit on the number of RSS queues
2085 * used by default by multiqueue devices.
2086 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002087int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002088{
2089 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2090}
2091EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2092
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002093static inline void __netif_reschedule(struct Qdisc *q)
2094{
2095 struct softnet_data *sd;
2096 unsigned long flags;
2097
2098 local_irq_save(flags);
2099 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002100 q->next_sched = NULL;
2101 *sd->output_queue_tailp = q;
2102 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002103 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2104 local_irq_restore(flags);
2105}
2106
David S. Miller37437bb2008-07-16 02:15:04 -07002107void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002108{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002109 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2110 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002111}
2112EXPORT_SYMBOL(__netif_schedule);
2113
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002114void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002115{
David S. Miller3578b0c2010-08-03 00:24:04 -07002116 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002117 struct softnet_data *sd;
2118 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002119
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002120 local_irq_save(flags);
2121 sd = &__get_cpu_var(softnet_data);
2122 skb->next = sd->completion_queue;
2123 sd->completion_queue = skb;
2124 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2125 local_irq_restore(flags);
2126 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002127}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002128EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002129
2130void dev_kfree_skb_any(struct sk_buff *skb)
2131{
2132 if (in_irq() || irqs_disabled())
2133 dev_kfree_skb_irq(skb);
2134 else
2135 dev_kfree_skb(skb);
2136}
2137EXPORT_SYMBOL(dev_kfree_skb_any);
2138
2139
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002140/**
2141 * netif_device_detach - mark device as removed
2142 * @dev: network device
2143 *
2144 * Mark device as removed from system and therefore no longer available.
2145 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002146void netif_device_detach(struct net_device *dev)
2147{
2148 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2149 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002150 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002151 }
2152}
2153EXPORT_SYMBOL(netif_device_detach);
2154
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002155/**
2156 * netif_device_attach - mark device as attached
2157 * @dev: network device
2158 *
2159 * Mark device as attached from system and restart if needed.
2160 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002161void netif_device_attach(struct net_device *dev)
2162{
2163 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2164 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002165 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002166 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002167 }
2168}
2169EXPORT_SYMBOL(netif_device_attach);
2170
Ben Hutchings36c92472012-01-17 07:57:56 +00002171static void skb_warn_bad_offload(const struct sk_buff *skb)
2172{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002173 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002174 struct net_device *dev = skb->dev;
2175 const char *driver = "";
2176
Ben Greearc846ad92013-04-19 10:45:52 +00002177 if (!net_ratelimit())
2178 return;
2179
Ben Hutchings36c92472012-01-17 07:57:56 +00002180 if (dev && dev->dev.parent)
2181 driver = dev_driver_string(dev->dev.parent);
2182
2183 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2184 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002185 driver, dev ? &dev->features : &null_features,
2186 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002187 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2188 skb_shinfo(skb)->gso_type, skb->ip_summed);
2189}
2190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191/*
2192 * Invalidate hardware checksum when packet is to be mangled, and
2193 * complete checksum manually on outgoing path.
2194 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002195int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196{
Al Virod3bc23e2006-11-14 21:24:49 -08002197 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002198 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Patrick McHardy84fa7932006-08-29 16:44:56 -07002200 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002201 goto out_set_summed;
2202
2203 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002204 skb_warn_bad_offload(skb);
2205 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 }
2207
Eric Dumazetcef401d2013-01-25 20:34:37 +00002208 /* Before computing a checksum, we should make sure no frag could
2209 * be modified by an external entity : checksum could be wrong.
2210 */
2211 if (skb_has_shared_frag(skb)) {
2212 ret = __skb_linearize(skb);
2213 if (ret)
2214 goto out;
2215 }
2216
Michał Mirosław55508d62010-12-14 15:24:08 +00002217 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002218 BUG_ON(offset >= skb_headlen(skb));
2219 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2220
2221 offset += skb->csum_offset;
2222 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2223
2224 if (skb_cloned(skb) &&
2225 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2227 if (ret)
2228 goto out;
2229 }
2230
Herbert Xua0308472007-10-15 01:47:15 -07002231 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002232out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002234out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 return ret;
2236}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002237EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002239__be16 skb_network_protocol(struct sk_buff *skb)
2240{
2241 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002242 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002243
Pravin B Shelar19acc322013-05-07 20:41:07 +00002244 /* Tunnel gso handlers can set protocol to ethernet. */
2245 if (type == htons(ETH_P_TEB)) {
2246 struct ethhdr *eth;
2247
2248 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2249 return 0;
2250
2251 eth = (struct ethhdr *)skb_mac_header(skb);
2252 type = eth->h_proto;
2253 }
2254
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002255 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002256 struct vlan_hdr *vh;
2257
2258 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2259 return 0;
2260
2261 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2262 type = vh->h_vlan_encapsulated_proto;
2263 vlan_depth += VLAN_HLEN;
2264 }
2265
2266 return type;
2267}
2268
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002269/**
2270 * skb_mac_gso_segment - mac layer segmentation handler.
2271 * @skb: buffer to segment
2272 * @features: features for the output path (see dev->features)
2273 */
2274struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2275 netdev_features_t features)
2276{
2277 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2278 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002279 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002280
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002281 if (unlikely(!type))
2282 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002283
2284 __skb_pull(skb, skb->mac_len);
2285
2286 rcu_read_lock();
2287 list_for_each_entry_rcu(ptype, &offload_base, list) {
2288 if (ptype->type == type && ptype->callbacks.gso_segment) {
2289 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2290 int err;
2291
2292 err = ptype->callbacks.gso_send_check(skb);
2293 segs = ERR_PTR(err);
2294 if (err || skb_gso_ok(skb, features))
2295 break;
2296 __skb_push(skb, (skb->data -
2297 skb_network_header(skb)));
2298 }
2299 segs = ptype->callbacks.gso_segment(skb, features);
2300 break;
2301 }
2302 }
2303 rcu_read_unlock();
2304
2305 __skb_push(skb, skb->data - skb_mac_header(skb));
2306
2307 return segs;
2308}
2309EXPORT_SYMBOL(skb_mac_gso_segment);
2310
2311
Cong Wang12b00042013-02-05 16:36:38 +00002312/* openvswitch calls this on rx path, so we need a different check.
2313 */
2314static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2315{
2316 if (tx_path)
2317 return skb->ip_summed != CHECKSUM_PARTIAL;
2318 else
2319 return skb->ip_summed == CHECKSUM_NONE;
2320}
2321
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002322/**
Cong Wang12b00042013-02-05 16:36:38 +00002323 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002324 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002325 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002326 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002327 *
2328 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002329 *
2330 * It may return NULL if the skb requires no segmentation. This is
2331 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002332 */
Cong Wang12b00042013-02-05 16:36:38 +00002333struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2334 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002335{
Cong Wang12b00042013-02-05 16:36:38 +00002336 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002337 int err;
2338
Ben Hutchings36c92472012-01-17 07:57:56 +00002339 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002340
Herbert Xua430a432006-07-08 13:34:56 -07002341 if (skb_header_cloned(skb) &&
2342 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2343 return ERR_PTR(err);
2344 }
2345
Pravin B Shelar68c33162013-02-14 14:02:41 +00002346 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002347 skb_reset_mac_header(skb);
2348 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002349
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002350 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002351}
Cong Wang12b00042013-02-05 16:36:38 +00002352EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002353
Herbert Xufb286bb2005-11-10 13:01:24 -08002354/* Take action when hardware reception checksum errors are detected. */
2355#ifdef CONFIG_BUG
2356void netdev_rx_csum_fault(struct net_device *dev)
2357{
2358 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002359 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002360 dump_stack();
2361 }
2362}
2363EXPORT_SYMBOL(netdev_rx_csum_fault);
2364#endif
2365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366/* Actually, we should eliminate this check as soon as we know, that:
2367 * 1. IOMMU is present and allows to map all the memory.
2368 * 2. No high memory really exists on this machine.
2369 */
2370
Eric Dumazet9092c652010-04-02 13:34:49 -07002371static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002373#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002375 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002376 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2377 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2378 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002379 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002380 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002383 if (PCI_DMA_BUS_IS_PHYS) {
2384 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
Eric Dumazet9092c652010-04-02 13:34:49 -07002386 if (!pdev)
2387 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002388 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002389 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2390 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002391 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2392 return 1;
2393 }
2394 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002395#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 return 0;
2397}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002399struct dev_gso_cb {
2400 void (*destructor)(struct sk_buff *skb);
2401};
2402
2403#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2404
2405static void dev_gso_skb_destructor(struct sk_buff *skb)
2406{
2407 struct dev_gso_cb *cb;
2408
2409 do {
2410 struct sk_buff *nskb = skb->next;
2411
2412 skb->next = nskb->next;
2413 nskb->next = NULL;
2414 kfree_skb(nskb);
2415 } while (skb->next);
2416
2417 cb = DEV_GSO_CB(skb);
2418 if (cb->destructor)
2419 cb->destructor(skb);
2420}
2421
2422/**
2423 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2424 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002425 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002426 *
2427 * This function segments the given skb and stores the list of segments
2428 * in skb->next.
2429 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002430static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002431{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002432 struct sk_buff *segs;
2433
Herbert Xu576a30e2006-06-27 13:22:38 -07002434 segs = skb_gso_segment(skb, features);
2435
2436 /* Verifying header integrity only. */
2437 if (!segs)
2438 return 0;
2439
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002440 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002441 return PTR_ERR(segs);
2442
2443 skb->next = segs;
2444 DEV_GSO_CB(skb)->destructor = skb->destructor;
2445 skb->destructor = dev_gso_skb_destructor;
2446
2447 return 0;
2448}
2449
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002450static netdev_features_t harmonize_features(struct sk_buff *skb,
2451 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002452{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002453 if (skb->ip_summed != CHECKSUM_NONE &&
2454 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002455 features &= ~NETIF_F_ALL_CSUM;
Jesse Grossf01a5232011-01-09 06:23:31 +00002456 } else if (illegal_highdma(skb->dev, skb)) {
2457 features &= ~NETIF_F_SG;
2458 }
2459
2460 return features;
2461}
2462
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002463netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002464{
2465 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002466 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002467
Ben Hutchings30b678d2012-07-30 15:57:00 +00002468 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2469 features &= ~NETIF_F_GSO_MASK;
2470
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002471 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002472 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2473 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002474 } else if (!vlan_tx_tag_present(skb)) {
2475 return harmonize_features(skb, protocol, features);
2476 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002477
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002478 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2479 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002480
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002481 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002482 return harmonize_features(skb, protocol, features);
2483 } else {
2484 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002485 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2486 NETIF_F_HW_VLAN_STAG_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002487 return harmonize_features(skb, protocol, features);
2488 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002489}
Jesse Grossf01a5232011-01-09 06:23:31 +00002490EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002491
John Fastabend6afff0c2010-06-16 14:18:12 +00002492/*
2493 * Returns true if either:
2494 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002495 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002496 */
2497static inline int skb_needs_linearize(struct sk_buff *skb,
Patrick McHardy6708c9e2013-05-01 22:36:49 +00002498 netdev_features_t features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002499{
Jesse Gross02932ce2011-01-09 06:23:34 +00002500 return skb_is_nonlinear(skb) &&
2501 ((skb_has_frag_list(skb) &&
2502 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002503 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002504 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002505}
2506
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002507int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2508 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002509{
Stephen Hemminger00829822008-11-20 20:14:53 -08002510 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002511 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002512 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002513
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002514 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002515 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002516
Eric Dumazet93f154b2009-05-18 22:19:19 -07002517 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002518 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002519 * its hot in this cpu cache
2520 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002521 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2522 skb_dst_drop(skb);
2523
Jesse Grossfc741212011-01-09 06:23:32 +00002524 features = netif_skb_features(skb);
2525
Jesse Gross7b9c6092010-10-20 13:56:04 +00002526 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002527 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2528 skb = __vlan_put_tag(skb, skb->vlan_proto,
2529 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002530 if (unlikely(!skb))
2531 goto out;
2532
2533 skb->vlan_tci = 0;
2534 }
2535
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002536 /* If encapsulation offload request, verify we are testing
2537 * hardware encapsulation features instead of standard
2538 * features for the netdev
2539 */
2540 if (skb->encapsulation)
2541 features &= dev->hw_enc_features;
2542
Jesse Grossfc741212011-01-09 06:23:32 +00002543 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002544 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002545 goto out_kfree_skb;
2546 if (skb->next)
2547 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002548 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002549 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002550 __skb_linearize(skb))
2551 goto out_kfree_skb;
2552
2553 /* If packet is not checksummed and device does not
2554 * support checksumming for this protocol, complete
2555 * checksumming here.
2556 */
2557 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002558 if (skb->encapsulation)
2559 skb_set_inner_transport_header(skb,
2560 skb_checksum_start_offset(skb));
2561 else
2562 skb_set_transport_header(skb,
2563 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002564 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002565 skb_checksum_help(skb))
2566 goto out_kfree_skb;
2567 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002568 }
2569
Eric Dumazetb40863c2012-09-18 20:44:49 +00002570 if (!list_empty(&ptype_all))
2571 dev_queue_xmit_nit(skb, dev);
2572
Koki Sanagiec764bf2011-05-30 21:48:34 +00002573 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002574 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002575 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002576 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002577 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002578 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002579 }
2580
Herbert Xu576a30e2006-06-27 13:22:38 -07002581gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002582 do {
2583 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002584
2585 skb->next = nskb->next;
2586 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002587
Eric Dumazetb40863c2012-09-18 20:44:49 +00002588 if (!list_empty(&ptype_all))
2589 dev_queue_xmit_nit(nskb, dev);
2590
Koki Sanagiec764bf2011-05-30 21:48:34 +00002591 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002592 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002593 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002594 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002595 if (rc & ~NETDEV_TX_MASK)
2596 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002597 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002598 skb->next = nskb;
2599 return rc;
2600 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002601 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002602 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002603 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002604 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002605
Patrick McHardy572a9d72009-11-10 06:14:14 +00002606out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002607 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002608 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002609 consume_skb(skb);
2610 return rc;
2611 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002612out_kfree_skb:
2613 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002614out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002615 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002616}
2617
Eric Dumazet1def9232013-01-10 12:36:42 +00002618static void qdisc_pkt_len_init(struct sk_buff *skb)
2619{
2620 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2621
2622 qdisc_skb_cb(skb)->pkt_len = skb->len;
2623
2624 /* To get more precise estimation of bytes sent on wire,
2625 * we add to pkt_len the headers size of all segments
2626 */
2627 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002628 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002629 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002630
Eric Dumazet757b8b12013-01-15 21:14:21 -08002631 /* mac layer + network layer */
2632 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2633
2634 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002635 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2636 hdr_len += tcp_hdrlen(skb);
2637 else
2638 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002639
2640 if (shinfo->gso_type & SKB_GSO_DODGY)
2641 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2642 shinfo->gso_size);
2643
2644 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002645 }
2646}
2647
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002648static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2649 struct net_device *dev,
2650 struct netdev_queue *txq)
2651{
2652 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002653 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002654 int rc;
2655
Eric Dumazet1def9232013-01-10 12:36:42 +00002656 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002657 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002658 /*
2659 * Heuristic to force contended enqueues to serialize on a
2660 * separate lock before trying to get qdisc main lock.
2661 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2662 * and dequeue packets faster.
2663 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002664 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002665 if (unlikely(contended))
2666 spin_lock(&q->busylock);
2667
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002668 spin_lock(root_lock);
2669 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2670 kfree_skb(skb);
2671 rc = NET_XMIT_DROP;
2672 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002673 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002674 /*
2675 * This is a work-conserving queue; there are no old skbs
2676 * waiting to be sent out; and the qdisc is not running -
2677 * xmit the skb directly.
2678 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002679 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2680 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002681
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002682 qdisc_bstats_update(q, skb);
2683
Eric Dumazet79640a42010-06-02 05:09:29 -07002684 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2685 if (unlikely(contended)) {
2686 spin_unlock(&q->busylock);
2687 contended = false;
2688 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002689 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002690 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002691 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002692
2693 rc = NET_XMIT_SUCCESS;
2694 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002695 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002696 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002697 if (qdisc_run_begin(q)) {
2698 if (unlikely(contended)) {
2699 spin_unlock(&q->busylock);
2700 contended = false;
2701 }
2702 __qdisc_run(q);
2703 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002704 }
2705 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002706 if (unlikely(contended))
2707 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002708 return rc;
2709}
2710
Neil Horman5bc14212011-11-22 05:10:51 +00002711#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2712static void skb_update_prio(struct sk_buff *skb)
2713{
Igor Maravic6977a792011-11-25 07:44:54 +00002714 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002715
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002716 if (!skb->priority && skb->sk && map) {
2717 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2718
2719 if (prioidx < map->priomap_len)
2720 skb->priority = map->priomap[prioidx];
2721 }
Neil Horman5bc14212011-11-22 05:10:51 +00002722}
2723#else
2724#define skb_update_prio(skb)
2725#endif
2726
Eric Dumazet745e20f2010-09-29 13:23:09 -07002727static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002728#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002729
Dave Jonesd29f7492008-07-22 14:09:06 -07002730/**
Michel Machado95603e22012-06-12 10:16:35 +00002731 * dev_loopback_xmit - loop back @skb
2732 * @skb: buffer to transmit
2733 */
2734int dev_loopback_xmit(struct sk_buff *skb)
2735{
2736 skb_reset_mac_header(skb);
2737 __skb_pull(skb, skb_network_offset(skb));
2738 skb->pkt_type = PACKET_LOOPBACK;
2739 skb->ip_summed = CHECKSUM_UNNECESSARY;
2740 WARN_ON(!skb_dst(skb));
2741 skb_dst_force(skb);
2742 netif_rx_ni(skb);
2743 return 0;
2744}
2745EXPORT_SYMBOL(dev_loopback_xmit);
2746
2747/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002748 * dev_queue_xmit - transmit a buffer
2749 * @skb: buffer to transmit
2750 *
2751 * Queue a buffer for transmission to a network device. The caller must
2752 * have set the device and priority and built the buffer before calling
2753 * this function. The function can be called from an interrupt.
2754 *
2755 * A negative errno code is returned on a failure. A success does not
2756 * guarantee the frame will be transmitted as it may be dropped due
2757 * to congestion or traffic shaping.
2758 *
2759 * -----------------------------------------------------------------------------------
2760 * I notice this method can also return errors from the queue disciplines,
2761 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2762 * be positive.
2763 *
2764 * Regardless of the return value, the skb is consumed, so it is currently
2765 * difficult to retry a send to this method. (You can bump the ref count
2766 * before sending to hold a reference for retry if you are careful.)
2767 *
2768 * When calling this method, interrupts MUST be enabled. This is because
2769 * the BH enable code must have IRQs enabled so that it will not deadlock.
2770 * --BLG
2771 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772int dev_queue_xmit(struct sk_buff *skb)
2773{
2774 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002775 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 struct Qdisc *q;
2777 int rc = -ENOMEM;
2778
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002779 skb_reset_mac_header(skb);
2780
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002781 /* Disable soft irqs for various locks below. Also
2782 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002784 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
Neil Horman5bc14212011-11-22 05:10:51 +00002786 skb_update_prio(skb);
2787
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002788 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002789 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002790
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002792 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002794 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002796 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002797 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 }
2799
2800 /* The device has no queue. Common case for software devices:
2801 loopback, all the sorts of tunnels...
2802
Herbert Xu932ff272006-06-09 12:20:56 -07002803 Really, it is unlikely that netif_tx_lock protection is necessary
2804 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 counters.)
2806 However, it is possible, that they rely on protection
2807 made by us here.
2808
2809 Check this and shot the lock. It is not prone from deadlocks.
2810 Either shot noqueue qdisc, it is even simpler 8)
2811 */
2812 if (dev->flags & IFF_UP) {
2813 int cpu = smp_processor_id(); /* ok because BHs are off */
2814
David S. Millerc773e842008-07-08 23:13:53 -07002815 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
Eric Dumazet745e20f2010-09-29 13:23:09 -07002817 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2818 goto recursion_alert;
2819
David S. Millerc773e842008-07-08 23:13:53 -07002820 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821
Tom Herbert734664982011-11-28 16:32:44 +00002822 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002823 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002824 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002825 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002826 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002827 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 goto out;
2829 }
2830 }
David S. Millerc773e842008-07-08 23:13:53 -07002831 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002832 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2833 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 } else {
2835 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002836 * unfortunately
2837 */
2838recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002839 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2840 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 }
2842 }
2843
2844 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002845 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 kfree_skb(skb);
2848 return rc;
2849out:
Herbert Xud4828d82006-06-22 02:28:18 -07002850 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 return rc;
2852}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002853EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
2855
2856/*=======================================================================
2857 Receiver routines
2858 =======================================================================*/
2859
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002860int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002861EXPORT_SYMBOL(netdev_max_backlog);
2862
Eric Dumazet3b098e22010-05-15 23:57:10 -07002863int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002864int netdev_budget __read_mostly = 300;
2865int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002867/* Called with irq disabled */
2868static inline void ____napi_schedule(struct softnet_data *sd,
2869 struct napi_struct *napi)
2870{
2871 list_add_tail(&napi->poll_list, &sd->poll_list);
2872 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2873}
2874
Eric Dumazetdf334542010-03-24 19:13:54 +00002875#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002876
2877/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002878struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002879EXPORT_SYMBOL(rps_sock_flow_table);
2880
Ingo Molnarc5905af2012-02-24 08:31:31 +01002881struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002882
Ben Hutchingsc4454772011-01-19 11:03:53 +00002883static struct rps_dev_flow *
2884set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2885 struct rps_dev_flow *rflow, u16 next_cpu)
2886{
Ben Hutchings09994d12011-10-03 04:42:46 +00002887 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002888#ifdef CONFIG_RFS_ACCEL
2889 struct netdev_rx_queue *rxqueue;
2890 struct rps_dev_flow_table *flow_table;
2891 struct rps_dev_flow *old_rflow;
2892 u32 flow_id;
2893 u16 rxq_index;
2894 int rc;
2895
2896 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002897 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2898 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002899 goto out;
2900 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2901 if (rxq_index == skb_get_rx_queue(skb))
2902 goto out;
2903
2904 rxqueue = dev->_rx + rxq_index;
2905 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2906 if (!flow_table)
2907 goto out;
2908 flow_id = skb->rxhash & flow_table->mask;
2909 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2910 rxq_index, flow_id);
2911 if (rc < 0)
2912 goto out;
2913 old_rflow = rflow;
2914 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002915 rflow->filter = rc;
2916 if (old_rflow->filter == rflow->filter)
2917 old_rflow->filter = RPS_NO_FILTER;
2918 out:
2919#endif
2920 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002921 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002922 }
2923
Ben Hutchings09994d12011-10-03 04:42:46 +00002924 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002925 return rflow;
2926}
2927
Tom Herbert0a9627f2010-03-16 08:03:29 +00002928/*
2929 * get_rps_cpu is called from netif_receive_skb and returns the target
2930 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002931 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002932 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002933static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2934 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002935{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002936 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002937 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002938 struct rps_dev_flow_table *flow_table;
2939 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002940 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002941 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002942
Tom Herbert0a9627f2010-03-16 08:03:29 +00002943 if (skb_rx_queue_recorded(skb)) {
2944 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002945 if (unlikely(index >= dev->real_num_rx_queues)) {
2946 WARN_ONCE(dev->real_num_rx_queues > 1,
2947 "%s received packet on queue %u, but number "
2948 "of RX queues is %u\n",
2949 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002950 goto done;
2951 }
2952 rxqueue = dev->_rx + index;
2953 } else
2954 rxqueue = dev->_rx;
2955
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002956 map = rcu_dereference(rxqueue->rps_map);
2957 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08002958 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00002959 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00002960 tcpu = map->cpus[0];
2961 if (cpu_online(tcpu))
2962 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002963 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00002964 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00002965 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00002966 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002967 }
2968
Changli Gao2d47b452010-08-17 19:00:56 +00002969 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002970 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00002971 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002972
Tom Herbertfec5e652010-04-16 16:01:27 -07002973 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2974 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2975 if (flow_table && sock_flow_table) {
2976 u16 next_cpu;
2977 struct rps_dev_flow *rflow;
2978
2979 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2980 tcpu = rflow->cpu;
2981
2982 next_cpu = sock_flow_table->ents[skb->rxhash &
2983 sock_flow_table->mask];
2984
2985 /*
2986 * If the desired CPU (where last recvmsg was done) is
2987 * different from current CPU (one in the rx-queue flow
2988 * table entry), switch if one of the following holds:
2989 * - Current CPU is unset (equal to RPS_NO_CPU).
2990 * - Current CPU is offline.
2991 * - The current CPU's queue tail has advanced beyond the
2992 * last packet that was enqueued using this table entry.
2993 * This guarantees that all previous packets for the flow
2994 * have been dequeued, thus preserving in order delivery.
2995 */
2996 if (unlikely(tcpu != next_cpu) &&
2997 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2998 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00002999 rflow->last_qtail)) >= 0)) {
3000 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003001 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003002 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003003
Tom Herbertfec5e652010-04-16 16:01:27 -07003004 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3005 *rflowp = rflow;
3006 cpu = tcpu;
3007 goto done;
3008 }
3009 }
3010
Tom Herbert0a9627f2010-03-16 08:03:29 +00003011 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003012 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003013
3014 if (cpu_online(tcpu)) {
3015 cpu = tcpu;
3016 goto done;
3017 }
3018 }
3019
3020done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003021 return cpu;
3022}
3023
Ben Hutchingsc4454772011-01-19 11:03:53 +00003024#ifdef CONFIG_RFS_ACCEL
3025
3026/**
3027 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3028 * @dev: Device on which the filter was set
3029 * @rxq_index: RX queue index
3030 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3031 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3032 *
3033 * Drivers that implement ndo_rx_flow_steer() should periodically call
3034 * this function for each installed filter and remove the filters for
3035 * which it returns %true.
3036 */
3037bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3038 u32 flow_id, u16 filter_id)
3039{
3040 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3041 struct rps_dev_flow_table *flow_table;
3042 struct rps_dev_flow *rflow;
3043 bool expire = true;
3044 int cpu;
3045
3046 rcu_read_lock();
3047 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3048 if (flow_table && flow_id <= flow_table->mask) {
3049 rflow = &flow_table->flows[flow_id];
3050 cpu = ACCESS_ONCE(rflow->cpu);
3051 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3052 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3053 rflow->last_qtail) <
3054 (int)(10 * flow_table->mask)))
3055 expire = false;
3056 }
3057 rcu_read_unlock();
3058 return expire;
3059}
3060EXPORT_SYMBOL(rps_may_expire_flow);
3061
3062#endif /* CONFIG_RFS_ACCEL */
3063
Tom Herbert0a9627f2010-03-16 08:03:29 +00003064/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003065static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003066{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003067 struct softnet_data *sd = data;
3068
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003069 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003070 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003071}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003072
Tom Herbertfec5e652010-04-16 16:01:27 -07003073#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003074
3075/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003076 * Check if this softnet_data structure is another cpu one
3077 * If yes, queue it to our IPI list and return 1
3078 * If no, return 0
3079 */
3080static int rps_ipi_queued(struct softnet_data *sd)
3081{
3082#ifdef CONFIG_RPS
3083 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3084
3085 if (sd != mysd) {
3086 sd->rps_ipi_next = mysd->rps_ipi_list;
3087 mysd->rps_ipi_list = sd;
3088
3089 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3090 return 1;
3091 }
3092#endif /* CONFIG_RPS */
3093 return 0;
3094}
3095
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003096#ifdef CONFIG_NET_FLOW_LIMIT
3097int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3098#endif
3099
3100static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3101{
3102#ifdef CONFIG_NET_FLOW_LIMIT
3103 struct sd_flow_limit *fl;
3104 struct softnet_data *sd;
3105 unsigned int old_flow, new_flow;
3106
3107 if (qlen < (netdev_max_backlog >> 1))
3108 return false;
3109
3110 sd = &__get_cpu_var(softnet_data);
3111
3112 rcu_read_lock();
3113 fl = rcu_dereference(sd->flow_limit);
3114 if (fl) {
3115 new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
3116 old_flow = fl->history[fl->history_head];
3117 fl->history[fl->history_head] = new_flow;
3118
3119 fl->history_head++;
3120 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3121
3122 if (likely(fl->buckets[old_flow]))
3123 fl->buckets[old_flow]--;
3124
3125 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3126 fl->count++;
3127 rcu_read_unlock();
3128 return true;
3129 }
3130 }
3131 rcu_read_unlock();
3132#endif
3133 return false;
3134}
3135
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003136/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003137 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3138 * queue (may be a remote CPU queue).
3139 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003140static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3141 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003142{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003143 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003144 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003145 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003146
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003147 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003148
3149 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003150
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003151 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003152 qlen = skb_queue_len(&sd->input_pkt_queue);
3153 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Changli Gao6e7676c2010-04-27 15:07:33 -07003154 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003155enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003156 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003157 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003158 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003159 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003160 return NET_RX_SUCCESS;
3161 }
3162
Eric Dumazetebda37c22010-05-06 23:51:21 +00003163 /* Schedule NAPI for backlog device
3164 * We can use non atomic operation since we own the queue lock
3165 */
3166 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003167 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003168 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003169 }
3170 goto enqueue;
3171 }
3172
Changli Gaodee42872010-05-02 05:42:16 +00003173 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003174 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003175
Tom Herbert0a9627f2010-03-16 08:03:29 +00003176 local_irq_restore(flags);
3177
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003178 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003179 kfree_skb(skb);
3180 return NET_RX_DROP;
3181}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183/**
3184 * netif_rx - post buffer to the network code
3185 * @skb: buffer to post
3186 *
3187 * This function receives a packet from a device driver and queues it for
3188 * the upper (protocol) levels to process. It always succeeds. The buffer
3189 * may be dropped during processing for congestion control or by the
3190 * protocol layers.
3191 *
3192 * return values:
3193 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 * NET_RX_DROP (packet was dropped)
3195 *
3196 */
3197
3198int netif_rx(struct sk_buff *skb)
3199{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003200 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201
3202 /* if netpoll wants it, pretend we never saw it */
3203 if (netpoll_rx(skb))
3204 return NET_RX_DROP;
3205
Eric Dumazet588f0332011-11-15 04:12:55 +00003206 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
Koki Sanagicf66ba52010-08-23 18:45:02 +09003208 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003209#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003210 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003211 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003212 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213
Changli Gaocece1942010-08-07 20:35:43 -07003214 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003215 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003216
3217 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003218 if (cpu < 0)
3219 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003220
3221 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3222
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003223 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003224 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003225 } else
3226#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003227 {
3228 unsigned int qtail;
3229 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3230 put_cpu();
3231 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003232 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003234EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235
3236int netif_rx_ni(struct sk_buff *skb)
3237{
3238 int err;
3239
3240 preempt_disable();
3241 err = netif_rx(skb);
3242 if (local_softirq_pending())
3243 do_softirq();
3244 preempt_enable();
3245
3246 return err;
3247}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248EXPORT_SYMBOL(netif_rx_ni);
3249
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250static void net_tx_action(struct softirq_action *h)
3251{
3252 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3253
3254 if (sd->completion_queue) {
3255 struct sk_buff *clist;
3256
3257 local_irq_disable();
3258 clist = sd->completion_queue;
3259 sd->completion_queue = NULL;
3260 local_irq_enable();
3261
3262 while (clist) {
3263 struct sk_buff *skb = clist;
3264 clist = clist->next;
3265
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003266 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003267 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 __kfree_skb(skb);
3269 }
3270 }
3271
3272 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003273 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274
3275 local_irq_disable();
3276 head = sd->output_queue;
3277 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003278 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279 local_irq_enable();
3280
3281 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003282 struct Qdisc *q = head;
3283 spinlock_t *root_lock;
3284
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 head = head->next_sched;
3286
David S. Miller5fb66222008-08-02 20:02:43 -07003287 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003288 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003289 smp_mb__before_clear_bit();
3290 clear_bit(__QDISC_STATE_SCHED,
3291 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003292 qdisc_run(q);
3293 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003295 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003296 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003297 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003298 } else {
3299 smp_mb__before_clear_bit();
3300 clear_bit(__QDISC_STATE_SCHED,
3301 &q->state);
3302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 }
3304 }
3305 }
3306}
3307
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003308#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3309 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003310/* This hook is defined here for ATM LANE */
3311int (*br_fdb_test_addr_hook)(struct net_device *dev,
3312 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003313EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003314#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316#ifdef CONFIG_NET_CLS_ACT
3317/* TODO: Maybe we should just force sch_ingress to be compiled in
3318 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3319 * a compare and 2 stores extra right now if we dont have it on
3320 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003321 * NOTE: This doesn't stop any functionality; if you dont have
3322 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 *
3324 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003325static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003328 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003329 int result = TC_ACT_OK;
3330 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003331
Stephen Hemmingerde384832010-08-01 00:33:23 -07003332 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003333 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3334 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003335 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 }
3337
Herbert Xuf697c3e2007-10-14 00:38:47 -07003338 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3339 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3340
David S. Miller83874002008-07-17 00:53:03 -07003341 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003342 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003343 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003344 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3345 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003346 spin_unlock(qdisc_lock(q));
3347 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003348
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 return result;
3350}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003351
3352static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3353 struct packet_type **pt_prev,
3354 int *ret, struct net_device *orig_dev)
3355{
Eric Dumazet24824a02010-10-02 06:11:55 +00003356 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3357
3358 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003359 goto out;
3360
3361 if (*pt_prev) {
3362 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3363 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003364 }
3365
Eric Dumazet24824a02010-10-02 06:11:55 +00003366 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003367 case TC_ACT_SHOT:
3368 case TC_ACT_STOLEN:
3369 kfree_skb(skb);
3370 return NULL;
3371 }
3372
3373out:
3374 skb->tc_verd = 0;
3375 return skb;
3376}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377#endif
3378
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003379/**
3380 * netdev_rx_handler_register - register receive handler
3381 * @dev: device to register a handler for
3382 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003383 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003384 *
3385 * Register a receive hander for a device. This handler will then be
3386 * called from __netif_receive_skb. A negative errno code is returned
3387 * on a failure.
3388 *
3389 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003390 *
3391 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003392 */
3393int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003394 rx_handler_func_t *rx_handler,
3395 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003396{
3397 ASSERT_RTNL();
3398
3399 if (dev->rx_handler)
3400 return -EBUSY;
3401
Eric Dumazet00cfec32013-03-29 03:01:22 +00003402 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003403 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003404 rcu_assign_pointer(dev->rx_handler, rx_handler);
3405
3406 return 0;
3407}
3408EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3409
3410/**
3411 * netdev_rx_handler_unregister - unregister receive handler
3412 * @dev: device to unregister a handler from
3413 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003414 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003415 *
3416 * The caller must hold the rtnl_mutex.
3417 */
3418void netdev_rx_handler_unregister(struct net_device *dev)
3419{
3420
3421 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003422 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003423 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3424 * section has a guarantee to see a non NULL rx_handler_data
3425 * as well.
3426 */
3427 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003428 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003429}
3430EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3431
Mel Gormanb4b9e352012-07-31 16:44:26 -07003432/*
3433 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3434 * the special handling of PFMEMALLOC skbs.
3435 */
3436static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3437{
3438 switch (skb->protocol) {
3439 case __constant_htons(ETH_P_ARP):
3440 case __constant_htons(ETH_P_IP):
3441 case __constant_htons(ETH_P_IPV6):
3442 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003443 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003444 return true;
3445 default:
3446 return false;
3447 }
3448}
3449
David S. Miller9754e292013-02-14 15:57:38 -05003450static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451{
3452 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003453 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003454 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003455 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003456 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003458 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459
Eric Dumazet588f0332011-11-15 04:12:55 +00003460 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003461
Koki Sanagicf66ba52010-08-23 18:45:02 +09003462 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003463
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003465 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003466 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003468 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003469
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003470 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003471 if (!skb_transport_header_was_set(skb))
3472 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003473 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474
3475 pt_prev = NULL;
3476
3477 rcu_read_lock();
3478
David S. Miller63d8ea72011-02-28 10:48:59 -08003479another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003480 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003481
3482 __this_cpu_inc(softnet_data.processed);
3483
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003484 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3485 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003486 skb = vlan_untag(skb);
3487 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003488 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003489 }
3490
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491#ifdef CONFIG_NET_CLS_ACT
3492 if (skb->tc_verd & TC_NCLS) {
3493 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3494 goto ncls;
3495 }
3496#endif
3497
David S. Miller9754e292013-02-14 15:57:38 -05003498 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003499 goto skip_taps;
3500
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003502 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003503 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003504 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 pt_prev = ptype;
3506 }
3507 }
3508
Mel Gormanb4b9e352012-07-31 16:44:26 -07003509skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003511 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3512 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003513 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514ncls:
3515#endif
3516
David S. Miller9754e292013-02-14 15:57:38 -05003517 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003518 goto drop;
3519
John Fastabend24257172011-10-10 09:16:41 +00003520 if (vlan_tx_tag_present(skb)) {
3521 if (pt_prev) {
3522 ret = deliver_skb(skb, pt_prev, orig_dev);
3523 pt_prev = NULL;
3524 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003525 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003526 goto another_round;
3527 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003528 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003529 }
3530
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003531 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003532 if (rx_handler) {
3533 if (pt_prev) {
3534 ret = deliver_skb(skb, pt_prev, orig_dev);
3535 pt_prev = NULL;
3536 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003537 switch (rx_handler(&skb)) {
3538 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003539 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003540 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003541 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003542 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003543 case RX_HANDLER_EXACT:
3544 deliver_exact = true;
3545 case RX_HANDLER_PASS:
3546 break;
3547 default:
3548 BUG();
3549 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003552 if (vlan_tx_nonzero_tag_present(skb))
3553 skb->pkt_type = PACKET_OTHERHOST;
3554
David S. Miller63d8ea72011-02-28 10:48:59 -08003555 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003556 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003557
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003559 list_for_each_entry_rcu(ptype,
3560 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003561 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003562 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3563 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003564 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003565 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566 pt_prev = ptype;
3567 }
3568 }
3569
3570 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003571 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003572 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003573 else
3574 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003576drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003577 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 kfree_skb(skb);
3579 /* Jamal, now you will not able to escape explaining
3580 * me how you were going to use this. :-)
3581 */
3582 ret = NET_RX_DROP;
3583 }
3584
Mel Gormanb4b9e352012-07-31 16:44:26 -07003585unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003587out:
David S. Miller9754e292013-02-14 15:57:38 -05003588 return ret;
3589}
3590
3591static int __netif_receive_skb(struct sk_buff *skb)
3592{
3593 int ret;
3594
3595 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3596 unsigned long pflags = current->flags;
3597
3598 /*
3599 * PFMEMALLOC skbs are special, they should
3600 * - be delivered to SOCK_MEMALLOC sockets only
3601 * - stay away from userspace
3602 * - have bounded memory usage
3603 *
3604 * Use PF_MEMALLOC as this saves us from propagating the allocation
3605 * context down to all allocation sites.
3606 */
3607 current->flags |= PF_MEMALLOC;
3608 ret = __netif_receive_skb_core(skb, true);
3609 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3610 } else
3611 ret = __netif_receive_skb_core(skb, false);
3612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 return ret;
3614}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003615
3616/**
3617 * netif_receive_skb - process receive buffer from network
3618 * @skb: buffer to process
3619 *
3620 * netif_receive_skb() is the main receive data processing function.
3621 * It always succeeds. The buffer may be dropped during processing
3622 * for congestion control or by the protocol layers.
3623 *
3624 * This function may only be called from softirq context and interrupts
3625 * should be enabled.
3626 *
3627 * Return values (usually ignored):
3628 * NET_RX_SUCCESS: no congestion
3629 * NET_RX_DROP: packet was dropped
3630 */
3631int netif_receive_skb(struct sk_buff *skb)
3632{
Eric Dumazet588f0332011-11-15 04:12:55 +00003633 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003634
Richard Cochranc1f19b52010-07-17 08:49:36 +00003635 if (skb_defer_rx_timestamp(skb))
3636 return NET_RX_SUCCESS;
3637
Eric Dumazetdf334542010-03-24 19:13:54 +00003638#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003639 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003640 struct rps_dev_flow voidflow, *rflow = &voidflow;
3641 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003642
Eric Dumazet3b098e22010-05-15 23:57:10 -07003643 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003644
Eric Dumazet3b098e22010-05-15 23:57:10 -07003645 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003646
Eric Dumazet3b098e22010-05-15 23:57:10 -07003647 if (cpu >= 0) {
3648 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3649 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003650 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003651 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003652 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003653 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003654#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003655 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003656}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003657EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658
Eric Dumazet88751272010-04-19 05:07:33 +00003659/* Network device is going away, flush any packets still pending
3660 * Called with irqs disabled.
3661 */
Changli Gao152102c2010-03-30 20:16:22 +00003662static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003663{
Changli Gao152102c2010-03-30 20:16:22 +00003664 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003665 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003666 struct sk_buff *skb, *tmp;
3667
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003668 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003669 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003670 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003671 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003672 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003673 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003674 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003675 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003676 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003677
3678 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3679 if (skb->dev == dev) {
3680 __skb_unlink(skb, &sd->process_queue);
3681 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003682 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003683 }
3684 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003685}
3686
Herbert Xud565b0a2008-12-15 23:38:52 -08003687static int napi_gro_complete(struct sk_buff *skb)
3688{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003689 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003690 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003691 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003692 int err = -ENOENT;
3693
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003694 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3695
Herbert Xufc59f9a2009-04-14 15:11:06 -07003696 if (NAPI_GRO_CB(skb)->count == 1) {
3697 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003698 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003699 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003700
3701 rcu_read_lock();
3702 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003703 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003704 continue;
3705
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003706 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003707 break;
3708 }
3709 rcu_read_unlock();
3710
3711 if (err) {
3712 WARN_ON(&ptype->list == head);
3713 kfree_skb(skb);
3714 return NET_RX_SUCCESS;
3715 }
3716
3717out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003718 return netif_receive_skb(skb);
3719}
3720
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003721/* napi->gro_list contains packets ordered by age.
3722 * youngest packets at the head of it.
3723 * Complete skbs in reverse order to reduce latencies.
3724 */
3725void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003726{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003727 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003728
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003729 /* scan list and build reverse chain */
3730 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3731 skb->prev = prev;
3732 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003733 }
3734
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003735 for (skb = prev; skb; skb = prev) {
3736 skb->next = NULL;
3737
3738 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3739 return;
3740
3741 prev = skb->prev;
3742 napi_gro_complete(skb);
3743 napi->gro_count--;
3744 }
3745
Herbert Xud565b0a2008-12-15 23:38:52 -08003746 napi->gro_list = NULL;
3747}
Eric Dumazet86cac582010-08-31 18:25:32 +00003748EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003749
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003750static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3751{
3752 struct sk_buff *p;
3753 unsigned int maclen = skb->dev->hard_header_len;
3754
3755 for (p = napi->gro_list; p; p = p->next) {
3756 unsigned long diffs;
3757
3758 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3759 diffs |= p->vlan_tci ^ skb->vlan_tci;
3760 if (maclen == ETH_HLEN)
3761 diffs |= compare_ether_header(skb_mac_header(p),
3762 skb_gro_mac_header(skb));
3763 else if (!diffs)
3764 diffs = memcmp(skb_mac_header(p),
3765 skb_gro_mac_header(skb),
3766 maclen);
3767 NAPI_GRO_CB(p)->same_flow = !diffs;
3768 NAPI_GRO_CB(p)->flush = 0;
3769 }
3770}
3771
Rami Rosenbb728822012-11-28 21:55:25 +00003772static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003773{
3774 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003775 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003776 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003777 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003778 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003779 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003780
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003781 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003782 goto normal;
3783
David S. Miller21dc3302010-08-23 00:13:46 -07003784 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003785 goto normal;
3786
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003787 gro_list_prepare(napi, skb);
3788
Herbert Xud565b0a2008-12-15 23:38:52 -08003789 rcu_read_lock();
3790 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003791 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003792 continue;
3793
Herbert Xu86911732009-01-29 14:19:50 +00003794 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003795 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003796 NAPI_GRO_CB(skb)->same_flow = 0;
3797 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003798 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003799
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003800 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003801 break;
3802 }
3803 rcu_read_unlock();
3804
3805 if (&ptype->list == head)
3806 goto normal;
3807
Herbert Xu0da2afd52008-12-26 14:57:42 -08003808 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003809 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003810
Herbert Xud565b0a2008-12-15 23:38:52 -08003811 if (pp) {
3812 struct sk_buff *nskb = *pp;
3813
3814 *pp = nskb->next;
3815 nskb->next = NULL;
3816 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003817 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003818 }
3819
Herbert Xu0da2afd52008-12-26 14:57:42 -08003820 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003821 goto ok;
3822
Herbert Xu4ae55442009-02-08 18:00:36 +00003823 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003824 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003825
Herbert Xu4ae55442009-02-08 18:00:36 +00003826 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003827 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003828 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003829 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003830 skb->next = napi->gro_list;
3831 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003832 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003833
Herbert Xuad0f9902009-02-01 01:24:55 -08003834pull:
Herbert Xucb189782009-05-26 18:50:31 +00003835 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3836 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3837
3838 BUG_ON(skb->end - skb->tail < grow);
3839
3840 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3841
3842 skb->tail += grow;
3843 skb->data_len -= grow;
3844
3845 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003846 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003847
Eric Dumazet9e903e02011-10-18 21:00:24 +00003848 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003849 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003850 memmove(skb_shinfo(skb)->frags,
3851 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003852 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003853 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003854 }
3855
Herbert Xud565b0a2008-12-15 23:38:52 -08003856ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003857 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003858
3859normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003860 ret = GRO_NORMAL;
3861 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003862}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003863
Herbert Xu96e93ea2009-01-06 10:49:34 -08003864
Rami Rosenbb728822012-11-28 21:55:25 +00003865static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003866{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003867 switch (ret) {
3868 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003869 if (netif_receive_skb(skb))
3870 ret = GRO_DROP;
3871 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003872
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003873 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003874 kfree_skb(skb);
3875 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003876
Eric Dumazetdaa86542012-04-19 07:07:40 +00003877 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003878 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3879 kmem_cache_free(skbuff_head_cache, skb);
3880 else
3881 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003882 break;
3883
Ben Hutchings5b252f02009-10-29 07:17:09 +00003884 case GRO_HELD:
3885 case GRO_MERGED:
3886 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003887 }
3888
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003889 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003890}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003891
Eric Dumazetca07e432012-10-06 22:28:06 +00003892static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003893{
Eric Dumazetca07e432012-10-06 22:28:06 +00003894 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3895 const skb_frag_t *frag0 = &pinfo->frags[0];
3896
Herbert Xu78a478d2009-05-26 18:50:21 +00003897 NAPI_GRO_CB(skb)->data_offset = 0;
3898 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003899 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003900
Herbert Xu78d3fd02009-05-26 18:50:23 +00003901 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003902 pinfo->nr_frags &&
3903 !PageHighMem(skb_frag_page(frag0))) {
3904 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3905 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003906 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003907}
Herbert Xu78a478d2009-05-26 18:50:21 +00003908
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003909gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003910{
Herbert Xu86911732009-01-29 14:19:50 +00003911 skb_gro_reset_offset(skb);
3912
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003913 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003914}
3915EXPORT_SYMBOL(napi_gro_receive);
3916
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003917static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003918{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003919 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003920 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3921 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003922 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003923 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003924 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003925
3926 napi->skb = skb;
3927}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003928
Herbert Xu76620aa2009-04-16 02:02:07 -07003929struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003930{
Herbert Xu5d38a072009-01-04 16:13:40 -08003931 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003932
3933 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003934 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3935 if (skb)
3936 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003937 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003938 return skb;
3939}
Herbert Xu76620aa2009-04-16 02:02:07 -07003940EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003941
Rami Rosenbb728822012-11-28 21:55:25 +00003942static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003943 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003944{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003945 switch (ret) {
3946 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003947 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003948 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003949
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003950 if (ret == GRO_HELD)
3951 skb_gro_pull(skb, -ETH_HLEN);
3952 else if (netif_receive_skb(skb))
3953 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003954 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003955
3956 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003957 case GRO_MERGED_FREE:
3958 napi_reuse_skb(napi, skb);
3959 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003960
3961 case GRO_MERGED:
3962 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003963 }
3964
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003965 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003966}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003967
Eric Dumazet4adb9c42012-05-18 20:49:06 +00003968static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003969{
Herbert Xu76620aa2009-04-16 02:02:07 -07003970 struct sk_buff *skb = napi->skb;
3971 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00003972 unsigned int hlen;
3973 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07003974
3975 napi->skb = NULL;
3976
3977 skb_reset_mac_header(skb);
3978 skb_gro_reset_offset(skb);
3979
Herbert Xua5b1cf22009-05-26 18:50:28 +00003980 off = skb_gro_offset(skb);
3981 hlen = off + sizeof(*eth);
3982 eth = skb_gro_header_fast(skb, off);
3983 if (skb_gro_header_hard(skb, hlen)) {
3984 eth = skb_gro_header_slow(skb, hlen, off);
3985 if (unlikely(!eth)) {
3986 napi_reuse_skb(napi, skb);
3987 skb = NULL;
3988 goto out;
3989 }
Herbert Xu76620aa2009-04-16 02:02:07 -07003990 }
3991
3992 skb_gro_pull(skb, sizeof(*eth));
3993
3994 /*
3995 * This works because the only protocols we care about don't require
3996 * special handling. We'll fix it up properly at the end.
3997 */
3998 skb->protocol = eth->h_proto;
3999
4000out:
4001 return skb;
4002}
Herbert Xu76620aa2009-04-16 02:02:07 -07004003
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004004gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004005{
4006 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004007
4008 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004009 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004010
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004011 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004012}
4013EXPORT_SYMBOL(napi_gro_frags);
4014
Eric Dumazete326bed2010-04-22 00:22:45 -07004015/*
4016 * net_rps_action sends any pending IPI's for rps.
4017 * Note: called with local irq disabled, but exits with local irq enabled.
4018 */
4019static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4020{
4021#ifdef CONFIG_RPS
4022 struct softnet_data *remsd = sd->rps_ipi_list;
4023
4024 if (remsd) {
4025 sd->rps_ipi_list = NULL;
4026
4027 local_irq_enable();
4028
4029 /* Send pending IPI's to kick RPS processing on remote cpus. */
4030 while (remsd) {
4031 struct softnet_data *next = remsd->rps_ipi_next;
4032
4033 if (cpu_online(remsd->cpu))
4034 __smp_call_function_single(remsd->cpu,
4035 &remsd->csd, 0);
4036 remsd = next;
4037 }
4038 } else
4039#endif
4040 local_irq_enable();
4041}
4042
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004043static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044{
4045 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004046 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047
Eric Dumazete326bed2010-04-22 00:22:45 -07004048#ifdef CONFIG_RPS
4049 /* Check if we have pending ipi, its better to send them now,
4050 * not waiting net_rx_action() end.
4051 */
4052 if (sd->rps_ipi_list) {
4053 local_irq_disable();
4054 net_rps_action_and_irq_enable(sd);
4055 }
4056#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004057 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004058 local_irq_disable();
4059 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004061 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062
Changli Gao6e7676c2010-04-27 15:07:33 -07004063 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004064 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004065 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004066 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004067 input_queue_head_incr(sd);
4068 if (++work >= quota) {
4069 local_irq_enable();
4070 return work;
4071 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004072 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073
Changli Gao6e7676c2010-04-27 15:07:33 -07004074 rps_lock(sd);
4075 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004076 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004077 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4078 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004079
Changli Gao6e7676c2010-04-27 15:07:33 -07004080 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004081 /*
4082 * Inline a custom version of __napi_complete().
4083 * only current cpu owns and manipulates this napi,
4084 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4085 * we can use a plain write instead of clear_bit(),
4086 * and we dont need an smp_mb() memory barrier.
4087 */
4088 list_del(&napi->poll_list);
4089 napi->state = 0;
4090
Changli Gao6e7676c2010-04-27 15:07:33 -07004091 quota = work + qlen;
4092 }
4093 rps_unlock(sd);
4094 }
4095 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004097 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098}
4099
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004100/**
4101 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004102 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004103 *
4104 * The entry's receive function will be scheduled to run
4105 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004106void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004107{
4108 unsigned long flags;
4109
4110 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004111 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004112 local_irq_restore(flags);
4113}
4114EXPORT_SYMBOL(__napi_schedule);
4115
Herbert Xud565b0a2008-12-15 23:38:52 -08004116void __napi_complete(struct napi_struct *n)
4117{
4118 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4119 BUG_ON(n->gro_list);
4120
4121 list_del(&n->poll_list);
4122 smp_mb__before_clear_bit();
4123 clear_bit(NAPI_STATE_SCHED, &n->state);
4124}
4125EXPORT_SYMBOL(__napi_complete);
4126
4127void napi_complete(struct napi_struct *n)
4128{
4129 unsigned long flags;
4130
4131 /*
4132 * don't let napi dequeue from the cpu poll list
4133 * just in case its running on a different cpu
4134 */
4135 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4136 return;
4137
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004138 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004139 local_irq_save(flags);
4140 __napi_complete(n);
4141 local_irq_restore(flags);
4142}
4143EXPORT_SYMBOL(napi_complete);
4144
4145void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4146 int (*poll)(struct napi_struct *, int), int weight)
4147{
4148 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004149 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004150 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004151 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004152 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004153 if (weight > NAPI_POLL_WEIGHT)
4154 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4155 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004156 napi->weight = weight;
4157 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004158 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004159#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004160 spin_lock_init(&napi->poll_lock);
4161 napi->poll_owner = -1;
4162#endif
4163 set_bit(NAPI_STATE_SCHED, &napi->state);
4164}
4165EXPORT_SYMBOL(netif_napi_add);
4166
4167void netif_napi_del(struct napi_struct *napi)
4168{
4169 struct sk_buff *skb, *next;
4170
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004171 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004172 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004173
4174 for (skb = napi->gro_list; skb; skb = next) {
4175 next = skb->next;
4176 skb->next = NULL;
4177 kfree_skb(skb);
4178 }
4179
4180 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004181 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004182}
4183EXPORT_SYMBOL(netif_napi_del);
4184
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185static void net_rx_action(struct softirq_action *h)
4186{
Eric Dumazete326bed2010-04-22 00:22:45 -07004187 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004188 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004189 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004190 void *have;
4191
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 local_irq_disable();
4193
Eric Dumazete326bed2010-04-22 00:22:45 -07004194 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004195 struct napi_struct *n;
4196 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004198 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004199 * Allow this to run for 2 jiffies since which will allow
4200 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004201 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004202 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203 goto softnet_break;
4204
4205 local_irq_enable();
4206
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004207 /* Even though interrupts have been re-enabled, this
4208 * access is safe because interrupts can only add new
4209 * entries to the tail of this list, and only ->poll()
4210 * calls can remove this head entry from the list.
4211 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004212 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004214 have = netpoll_poll_lock(n);
4215
4216 weight = n->weight;
4217
David S. Miller0a7606c2007-10-29 21:28:47 -07004218 /* This NAPI_STATE_SCHED test is for avoiding a race
4219 * with netpoll's poll_napi(). Only the entity which
4220 * obtains the lock and sees NAPI_STATE_SCHED set will
4221 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004222 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004223 */
4224 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004225 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004226 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004227 trace_napi_poll(n);
4228 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004229
4230 WARN_ON_ONCE(work > weight);
4231
4232 budget -= work;
4233
4234 local_irq_disable();
4235
4236 /* Drivers must not modify the NAPI state if they
4237 * consume the entire weight. In such cases this code
4238 * still "owns" the NAPI instance and therefore can
4239 * move the instance around on the list at-will.
4240 */
David S. Millerfed17f32008-01-07 21:00:40 -08004241 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004242 if (unlikely(napi_disable_pending(n))) {
4243 local_irq_enable();
4244 napi_complete(n);
4245 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004246 } else {
4247 if (n->gro_list) {
4248 /* flush too old packets
4249 * If HZ < 1000, flush all packets.
4250 */
4251 local_irq_enable();
4252 napi_gro_flush(n, HZ >= 1000);
4253 local_irq_disable();
4254 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004255 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004256 }
David S. Millerfed17f32008-01-07 21:00:40 -08004257 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004258
4259 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260 }
4261out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004262 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004263
Chris Leechdb217332006-06-17 21:24:58 -07004264#ifdef CONFIG_NET_DMA
4265 /*
4266 * There may not be any more sk_buffs coming right now, so push
4267 * any pending DMA copies to hardware
4268 */
Dan Williams2ba05622009-01-06 11:38:14 -07004269 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004270#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004271
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 return;
4273
4274softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004275 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4277 goto out;
4278}
4279
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004280struct netdev_upper {
4281 struct net_device *dev;
4282 bool master;
4283 struct list_head list;
4284 struct rcu_head rcu;
4285 struct list_head search_list;
4286};
4287
4288static void __append_search_uppers(struct list_head *search_list,
4289 struct net_device *dev)
4290{
4291 struct netdev_upper *upper;
4292
4293 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4294 /* check if this upper is not already in search list */
4295 if (list_empty(&upper->search_list))
4296 list_add_tail(&upper->search_list, search_list);
4297 }
4298}
4299
4300static bool __netdev_search_upper_dev(struct net_device *dev,
4301 struct net_device *upper_dev)
4302{
4303 LIST_HEAD(search_list);
4304 struct netdev_upper *upper;
4305 struct netdev_upper *tmp;
4306 bool ret = false;
4307
4308 __append_search_uppers(&search_list, dev);
4309 list_for_each_entry(upper, &search_list, search_list) {
4310 if (upper->dev == upper_dev) {
4311 ret = true;
4312 break;
4313 }
4314 __append_search_uppers(&search_list, upper->dev);
4315 }
4316 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4317 INIT_LIST_HEAD(&upper->search_list);
4318 return ret;
4319}
4320
4321static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4322 struct net_device *upper_dev)
4323{
4324 struct netdev_upper *upper;
4325
4326 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4327 if (upper->dev == upper_dev)
4328 return upper;
4329 }
4330 return NULL;
4331}
4332
4333/**
4334 * netdev_has_upper_dev - Check if device is linked to an upper device
4335 * @dev: device
4336 * @upper_dev: upper device to check
4337 *
4338 * Find out if a device is linked to specified upper device and return true
4339 * in case it is. Note that this checks only immediate upper device,
4340 * not through a complete stack of devices. The caller must hold the RTNL lock.
4341 */
4342bool netdev_has_upper_dev(struct net_device *dev,
4343 struct net_device *upper_dev)
4344{
4345 ASSERT_RTNL();
4346
4347 return __netdev_find_upper(dev, upper_dev);
4348}
4349EXPORT_SYMBOL(netdev_has_upper_dev);
4350
4351/**
4352 * netdev_has_any_upper_dev - Check if device is linked to some device
4353 * @dev: device
4354 *
4355 * Find out if a device is linked to an upper device and return true in case
4356 * it is. The caller must hold the RTNL lock.
4357 */
4358bool netdev_has_any_upper_dev(struct net_device *dev)
4359{
4360 ASSERT_RTNL();
4361
4362 return !list_empty(&dev->upper_dev_list);
4363}
4364EXPORT_SYMBOL(netdev_has_any_upper_dev);
4365
4366/**
4367 * netdev_master_upper_dev_get - Get master upper device
4368 * @dev: device
4369 *
4370 * Find a master upper device and return pointer to it or NULL in case
4371 * it's not there. The caller must hold the RTNL lock.
4372 */
4373struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4374{
4375 struct netdev_upper *upper;
4376
4377 ASSERT_RTNL();
4378
4379 if (list_empty(&dev->upper_dev_list))
4380 return NULL;
4381
4382 upper = list_first_entry(&dev->upper_dev_list,
4383 struct netdev_upper, list);
4384 if (likely(upper->master))
4385 return upper->dev;
4386 return NULL;
4387}
4388EXPORT_SYMBOL(netdev_master_upper_dev_get);
4389
4390/**
4391 * netdev_master_upper_dev_get_rcu - Get master upper device
4392 * @dev: device
4393 *
4394 * Find a master upper device and return pointer to it or NULL in case
4395 * it's not there. The caller must hold the RCU read lock.
4396 */
4397struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4398{
4399 struct netdev_upper *upper;
4400
4401 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4402 struct netdev_upper, list);
4403 if (upper && likely(upper->master))
4404 return upper->dev;
4405 return NULL;
4406}
4407EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4408
4409static int __netdev_upper_dev_link(struct net_device *dev,
4410 struct net_device *upper_dev, bool master)
4411{
4412 struct netdev_upper *upper;
4413
4414 ASSERT_RTNL();
4415
4416 if (dev == upper_dev)
4417 return -EBUSY;
4418
4419 /* To prevent loops, check if dev is not upper device to upper_dev. */
4420 if (__netdev_search_upper_dev(upper_dev, dev))
4421 return -EBUSY;
4422
4423 if (__netdev_find_upper(dev, upper_dev))
4424 return -EEXIST;
4425
4426 if (master && netdev_master_upper_dev_get(dev))
4427 return -EBUSY;
4428
4429 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4430 if (!upper)
4431 return -ENOMEM;
4432
4433 upper->dev = upper_dev;
4434 upper->master = master;
4435 INIT_LIST_HEAD(&upper->search_list);
4436
4437 /* Ensure that master upper link is always the first item in list. */
4438 if (master)
4439 list_add_rcu(&upper->list, &dev->upper_dev_list);
4440 else
4441 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4442 dev_hold(upper_dev);
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004443 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004444 return 0;
4445}
4446
4447/**
4448 * netdev_upper_dev_link - Add a link to the upper device
4449 * @dev: device
4450 * @upper_dev: new upper device
4451 *
4452 * Adds a link to device which is upper to this one. The caller must hold
4453 * the RTNL lock. On a failure a negative errno code is returned.
4454 * On success the reference counts are adjusted and the function
4455 * returns zero.
4456 */
4457int netdev_upper_dev_link(struct net_device *dev,
4458 struct net_device *upper_dev)
4459{
4460 return __netdev_upper_dev_link(dev, upper_dev, false);
4461}
4462EXPORT_SYMBOL(netdev_upper_dev_link);
4463
4464/**
4465 * netdev_master_upper_dev_link - Add a master link to the upper device
4466 * @dev: device
4467 * @upper_dev: new upper device
4468 *
4469 * Adds a link to device which is upper to this one. In this case, only
4470 * one master upper device can be linked, although other non-master devices
4471 * might be linked as well. The caller must hold the RTNL lock.
4472 * On a failure a negative errno code is returned. On success the reference
4473 * counts are adjusted and the function returns zero.
4474 */
4475int netdev_master_upper_dev_link(struct net_device *dev,
4476 struct net_device *upper_dev)
4477{
4478 return __netdev_upper_dev_link(dev, upper_dev, true);
4479}
4480EXPORT_SYMBOL(netdev_master_upper_dev_link);
4481
4482/**
4483 * netdev_upper_dev_unlink - Removes a link to upper device
4484 * @dev: device
4485 * @upper_dev: new upper device
4486 *
4487 * Removes a link to device which is upper to this one. The caller must hold
4488 * the RTNL lock.
4489 */
4490void netdev_upper_dev_unlink(struct net_device *dev,
4491 struct net_device *upper_dev)
4492{
4493 struct netdev_upper *upper;
4494
4495 ASSERT_RTNL();
4496
4497 upper = __netdev_find_upper(dev, upper_dev);
4498 if (!upper)
4499 return;
4500 list_del_rcu(&upper->list);
4501 dev_put(upper_dev);
4502 kfree_rcu(upper, rcu);
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004503 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004504}
4505EXPORT_SYMBOL(netdev_upper_dev_unlink);
4506
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004507static void dev_change_rx_flags(struct net_device *dev, int flags)
4508{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004509 const struct net_device_ops *ops = dev->netdev_ops;
4510
4511 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4512 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004513}
4514
Wang Chendad9b332008-06-18 01:48:28 -07004515static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07004516{
Eric Dumazetb536db92011-11-30 21:42:26 +00004517 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004518 kuid_t uid;
4519 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07004520
Patrick McHardy24023452007-07-14 18:51:31 -07004521 ASSERT_RTNL();
4522
Wang Chendad9b332008-06-18 01:48:28 -07004523 dev->flags |= IFF_PROMISC;
4524 dev->promiscuity += inc;
4525 if (dev->promiscuity == 0) {
4526 /*
4527 * Avoid overflow.
4528 * If inc causes overflow, untouch promisc and return error.
4529 */
4530 if (inc < 0)
4531 dev->flags &= ~IFF_PROMISC;
4532 else {
4533 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004534 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4535 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004536 return -EOVERFLOW;
4537 }
4538 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004539 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004540 pr_info("device %s %s promiscuous mode\n",
4541 dev->name,
4542 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11004543 if (audit_enabled) {
4544 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004545 audit_log(current->audit_context, GFP_ATOMIC,
4546 AUDIT_ANOM_PROMISCUOUS,
4547 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4548 dev->name, (dev->flags & IFF_PROMISC),
4549 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07004550 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004551 from_kuid(&init_user_ns, uid),
4552 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004553 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11004554 }
Patrick McHardy24023452007-07-14 18:51:31 -07004555
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004556 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07004557 }
Wang Chendad9b332008-06-18 01:48:28 -07004558 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004559}
4560
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561/**
4562 * dev_set_promiscuity - update promiscuity count on a device
4563 * @dev: device
4564 * @inc: modifier
4565 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07004566 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567 * remains above zero the interface remains promiscuous. Once it hits zero
4568 * the device reverts back to normal filtering operation. A negative inc
4569 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07004570 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571 */
Wang Chendad9b332008-06-18 01:48:28 -07004572int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573{
Eric Dumazetb536db92011-11-30 21:42:26 +00004574 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07004575 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576
Wang Chendad9b332008-06-18 01:48:28 -07004577 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07004578 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07004579 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07004580 if (dev->flags != old_flags)
4581 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07004582 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004584EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585
4586/**
4587 * dev_set_allmulti - update allmulti count on a device
4588 * @dev: device
4589 * @inc: modifier
4590 *
4591 * Add or remove reception of all multicast frames to a device. While the
4592 * count in the device remains above zero the interface remains listening
4593 * to all interfaces. Once it hits zero the device reverts back to normal
4594 * filtering operation. A negative @inc value is used to drop the counter
4595 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07004596 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 */
4598
Wang Chendad9b332008-06-18 01:48:28 -07004599int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600{
Eric Dumazetb536db92011-11-30 21:42:26 +00004601 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602
Patrick McHardy24023452007-07-14 18:51:31 -07004603 ASSERT_RTNL();
4604
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07004606 dev->allmulti += inc;
4607 if (dev->allmulti == 0) {
4608 /*
4609 * Avoid overflow.
4610 * If inc causes overflow, untouch allmulti and return error.
4611 */
4612 if (inc < 0)
4613 dev->flags &= ~IFF_ALLMULTI;
4614 else {
4615 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004616 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4617 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004618 return -EOVERFLOW;
4619 }
4620 }
Patrick McHardy24023452007-07-14 18:51:31 -07004621 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004622 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07004623 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07004624 }
Wang Chendad9b332008-06-18 01:48:28 -07004625 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004626}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004627EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07004628
4629/*
4630 * Upload unicast and multicast address lists to device and
4631 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08004632 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07004633 * are present.
4634 */
4635void __dev_set_rx_mode(struct net_device *dev)
4636{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004637 const struct net_device_ops *ops = dev->netdev_ops;
4638
Patrick McHardy4417da62007-06-27 01:28:10 -07004639 /* dev_open will call this function so the list will stay sane. */
4640 if (!(dev->flags&IFF_UP))
4641 return;
4642
4643 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09004644 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07004645
Jiri Pirko01789342011-08-16 06:29:00 +00004646 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004647 /* Unicast addresses changes may only happen under the rtnl,
4648 * therefore calling __dev_set_promiscuity here is safe.
4649 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004650 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004651 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07004652 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004653 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004654 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07004655 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07004656 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004657 }
Jiri Pirko01789342011-08-16 06:29:00 +00004658
4659 if (ops->ndo_set_rx_mode)
4660 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004661}
4662
4663void dev_set_rx_mode(struct net_device *dev)
4664{
David S. Millerb9e40852008-07-15 00:15:08 -07004665 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004666 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07004667 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668}
4669
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004670/**
4671 * dev_get_flags - get flags reported to userspace
4672 * @dev: device
4673 *
4674 * Get the combination of flag bits exported through APIs to userspace.
4675 */
Eric Dumazet95c96172012-04-15 05:58:06 +00004676unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677{
Eric Dumazet95c96172012-04-15 05:58:06 +00004678 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679
4680 flags = (dev->flags & ~(IFF_PROMISC |
4681 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004682 IFF_RUNNING |
4683 IFF_LOWER_UP |
4684 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 (dev->gflags & (IFF_PROMISC |
4686 IFF_ALLMULTI));
4687
Stefan Rompfb00055a2006-03-20 17:09:11 -08004688 if (netif_running(dev)) {
4689 if (netif_oper_up(dev))
4690 flags |= IFF_RUNNING;
4691 if (netif_carrier_ok(dev))
4692 flags |= IFF_LOWER_UP;
4693 if (netif_dormant(dev))
4694 flags |= IFF_DORMANT;
4695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696
4697 return flags;
4698}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004699EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700
Patrick McHardybd380812010-02-26 06:34:53 +00004701int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702{
Eric Dumazetb536db92011-11-30 21:42:26 +00004703 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004704 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705
Patrick McHardy24023452007-07-14 18:51:31 -07004706 ASSERT_RTNL();
4707
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 /*
4709 * Set the flags on our device.
4710 */
4711
4712 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4713 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4714 IFF_AUTOMEDIA)) |
4715 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4716 IFF_ALLMULTI));
4717
4718 /*
4719 * Load in the correct multicast list now the flags have changed.
4720 */
4721
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004722 if ((old_flags ^ flags) & IFF_MULTICAST)
4723 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004724
Patrick McHardy4417da62007-06-27 01:28:10 -07004725 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726
4727 /*
4728 * Have we downed the interface. We handle IFF_UP ourselves
4729 * according to user attempts to set it, rather than blindly
4730 * setting it.
4731 */
4732
4733 ret = 0;
4734 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004735 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736
4737 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004738 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 }
4740
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004742 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4743
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744 dev->gflags ^= IFF_PROMISC;
4745 dev_set_promiscuity(dev, inc);
4746 }
4747
4748 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4749 is important. Some (broken) drivers set IFF_PROMISC, when
4750 IFF_ALLMULTI is requested not asking us and not reporting.
4751 */
4752 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004753 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4754
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 dev->gflags ^= IFF_ALLMULTI;
4756 dev_set_allmulti(dev, inc);
4757 }
4758
Patrick McHardybd380812010-02-26 06:34:53 +00004759 return ret;
4760}
4761
4762void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4763{
4764 unsigned int changes = dev->flags ^ old_flags;
4765
4766 if (changes & IFF_UP) {
4767 if (dev->flags & IFF_UP)
4768 call_netdevice_notifiers(NETDEV_UP, dev);
4769 else
4770 call_netdevice_notifiers(NETDEV_DOWN, dev);
4771 }
4772
4773 if (dev->flags & IFF_UP &&
4774 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4775 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4776}
4777
4778/**
4779 * dev_change_flags - change device settings
4780 * @dev: device
4781 * @flags: device state flags
4782 *
4783 * Change settings on device based state flags. The flags are
4784 * in the userspace exported format.
4785 */
Eric Dumazetb536db92011-11-30 21:42:26 +00004786int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00004787{
Eric Dumazetb536db92011-11-30 21:42:26 +00004788 int ret;
4789 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004790
4791 ret = __dev_change_flags(dev, flags);
4792 if (ret < 0)
4793 return ret;
4794
4795 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004796 if (changes)
4797 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798
Patrick McHardybd380812010-02-26 06:34:53 +00004799 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800 return ret;
4801}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004802EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004804/**
4805 * dev_set_mtu - Change maximum transfer unit
4806 * @dev: device
4807 * @new_mtu: new transfer unit
4808 *
4809 * Change the maximum transfer size of the network device.
4810 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811int dev_set_mtu(struct net_device *dev, int new_mtu)
4812{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004813 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 int err;
4815
4816 if (new_mtu == dev->mtu)
4817 return 0;
4818
4819 /* MTU must be positive. */
4820 if (new_mtu < 0)
4821 return -EINVAL;
4822
4823 if (!netif_device_present(dev))
4824 return -ENODEV;
4825
4826 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004827 if (ops->ndo_change_mtu)
4828 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829 else
4830 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004831
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00004832 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004833 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834 return err;
4835}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004836EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004838/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00004839 * dev_set_group - Change group this device belongs to
4840 * @dev: device
4841 * @new_group: group this device should belong to
4842 */
4843void dev_set_group(struct net_device *dev, int new_group)
4844{
4845 dev->group = new_group;
4846}
4847EXPORT_SYMBOL(dev_set_group);
4848
4849/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004850 * dev_set_mac_address - Change Media Access Control Address
4851 * @dev: device
4852 * @sa: new address
4853 *
4854 * Change the hardware (MAC) address of the device
4855 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4857{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004858 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 int err;
4860
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004861 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862 return -EOPNOTSUPP;
4863 if (sa->sa_family != dev->type)
4864 return -EINVAL;
4865 if (!netif_device_present(dev))
4866 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004867 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00004868 if (err)
4869 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00004870 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00004871 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04004872 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00004873 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004875EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004877/**
4878 * dev_change_carrier - Change device carrier
4879 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00004880 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004881 *
4882 * Change device carrier
4883 */
4884int dev_change_carrier(struct net_device *dev, bool new_carrier)
4885{
4886 const struct net_device_ops *ops = dev->netdev_ops;
4887
4888 if (!ops->ndo_change_carrier)
4889 return -EOPNOTSUPP;
4890 if (!netif_device_present(dev))
4891 return -ENODEV;
4892 return ops->ndo_change_carrier(dev, new_carrier);
4893}
4894EXPORT_SYMBOL(dev_change_carrier);
4895
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896/**
4897 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004898 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899 *
4900 * Returns a suitable unique value for a new device interface
4901 * number. The caller must hold the rtnl semaphore or the
4902 * dev_base_lock to be sure it remains unique.
4903 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004904static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004906 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004907 for (;;) {
4908 if (++ifindex <= 0)
4909 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004910 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004911 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912 }
4913}
4914
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004916static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004917
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004918static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921}
4922
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004923static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004924{
Krishna Kumare93737b2009-12-08 22:26:02 +00004925 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004926
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004927 BUG_ON(dev_boot_phase);
4928 ASSERT_RTNL();
4929
Krishna Kumare93737b2009-12-08 22:26:02 +00004930 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004931 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004932 * for initialization unwind. Remove those
4933 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004934 */
4935 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004936 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
4937 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004938
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004939 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004940 list_del(&dev->unreg_list);
4941 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004942 }
Eric Dumazet449f4542011-05-19 12:24:16 +00004943 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004944 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00004945 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004946
Octavian Purdila44345722010-12-13 12:44:07 +00004947 /* If device is running, close it first. */
4948 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004949
Octavian Purdila44345722010-12-13 12:44:07 +00004950 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004951 /* And unlink it from device chain. */
4952 unlist_netdevice(dev);
4953
4954 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004955 }
4956
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004957 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004958
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004959 list_for_each_entry(dev, head, unreg_list) {
4960 /* Shutdown queueing discipline. */
4961 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004962
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004963
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004964 /* Notify protocols, that we are about to destroy
4965 this device. They should clean all the things.
4966 */
4967 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4968
Patrick McHardya2835762010-02-26 06:34:51 +00004969 if (!dev->rtnl_link_ops ||
4970 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4971 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4972
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004973 /*
4974 * Flush the unicast and multicast chains
4975 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00004976 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004977 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004978
4979 if (dev->netdev_ops->ndo_uninit)
4980 dev->netdev_ops->ndo_uninit(dev);
4981
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004982 /* Notifier chain MUST detach us all upper devices. */
4983 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004984
4985 /* Remove entries from kobject tree */
4986 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00004987#ifdef CONFIG_XPS
4988 /* Remove XPS queueing entries */
4989 netif_reset_xps_queues_gt(dev, 0);
4990#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004991 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004992
Eric W. Biederman850a5452011-10-13 22:25:23 +00004993 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004994
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004995 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004996 dev_put(dev);
4997}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004998
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004999static void rollback_registered(struct net_device *dev)
5000{
5001 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005002
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005003 list_add(&dev->unreg_list, &single);
5004 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005005 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005006}
5007
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005008static netdev_features_t netdev_fix_features(struct net_device *dev,
5009 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005010{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005011 /* Fix illegal checksum combinations */
5012 if ((features & NETIF_F_HW_CSUM) &&
5013 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005014 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005015 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5016 }
5017
Herbert Xub63365a2008-10-23 01:11:29 -07005018 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005019 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005020 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005021 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005022 }
5023
Pravin B Shelarec5f0612013-03-07 09:28:01 +00005024 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5025 !(features & NETIF_F_IP_CSUM)) {
5026 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5027 features &= ~NETIF_F_TSO;
5028 features &= ~NETIF_F_TSO_ECN;
5029 }
5030
5031 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5032 !(features & NETIF_F_IPV6_CSUM)) {
5033 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5034 features &= ~NETIF_F_TSO6;
5035 }
5036
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005037 /* TSO ECN requires that TSO is present as well. */
5038 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5039 features &= ~NETIF_F_TSO_ECN;
5040
Michał Mirosław212b5732011-02-15 16:59:16 +00005041 /* Software GSO depends on SG. */
5042 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005043 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005044 features &= ~NETIF_F_GSO;
5045 }
5046
Michał Mirosławacd11302011-01-24 15:45:15 -08005047 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005048 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005049 /* maybe split UFO into V4 and V6? */
5050 if (!((features & NETIF_F_GEN_CSUM) ||
5051 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5052 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005053 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005054 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005055 features &= ~NETIF_F_UFO;
5056 }
5057
5058 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005059 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005060 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005061 features &= ~NETIF_F_UFO;
5062 }
5063 }
5064
5065 return features;
5066}
Herbert Xub63365a2008-10-23 01:11:29 -07005067
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005068int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005069{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005070 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005071 int err = 0;
5072
Michał Mirosław87267482011-04-12 09:56:38 +00005073 ASSERT_RTNL();
5074
Michał Mirosław5455c692011-02-15 16:59:17 +00005075 features = netdev_get_wanted_features(dev);
5076
5077 if (dev->netdev_ops->ndo_fix_features)
5078 features = dev->netdev_ops->ndo_fix_features(dev, features);
5079
5080 /* driver might be less strict about feature dependencies */
5081 features = netdev_fix_features(dev, features);
5082
5083 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005084 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005085
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005086 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5087 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005088
5089 if (dev->netdev_ops->ndo_set_features)
5090 err = dev->netdev_ops->ndo_set_features(dev, features);
5091
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005092 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005093 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005094 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5095 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005096 return -1;
5097 }
5098
5099 if (!err)
5100 dev->features = features;
5101
5102 return 1;
5103}
5104
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005105/**
5106 * netdev_update_features - recalculate device features
5107 * @dev: the device to check
5108 *
5109 * Recalculate dev->features set and send notifications if it
5110 * has changed. Should be called after driver or hardware dependent
5111 * conditions might have changed that influence the features.
5112 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005113void netdev_update_features(struct net_device *dev)
5114{
5115 if (__netdev_update_features(dev))
5116 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005117}
5118EXPORT_SYMBOL(netdev_update_features);
5119
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005121 * netdev_change_features - recalculate device features
5122 * @dev: the device to check
5123 *
5124 * Recalculate dev->features set and send notifications even
5125 * if they have not changed. Should be called instead of
5126 * netdev_update_features() if also dev->vlan_features might
5127 * have changed to allow the changes to be propagated to stacked
5128 * VLAN devices.
5129 */
5130void netdev_change_features(struct net_device *dev)
5131{
5132 __netdev_update_features(dev);
5133 netdev_features_change(dev);
5134}
5135EXPORT_SYMBOL(netdev_change_features);
5136
5137/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005138 * netif_stacked_transfer_operstate - transfer operstate
5139 * @rootdev: the root or lower level device to transfer state from
5140 * @dev: the device to transfer operstate to
5141 *
5142 * Transfer operational state from root to device. This is normally
5143 * called when a stacking relationship exists between the root
5144 * device and the device(a leaf device).
5145 */
5146void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5147 struct net_device *dev)
5148{
5149 if (rootdev->operstate == IF_OPER_DORMANT)
5150 netif_dormant_on(dev);
5151 else
5152 netif_dormant_off(dev);
5153
5154 if (netif_carrier_ok(rootdev)) {
5155 if (!netif_carrier_ok(dev))
5156 netif_carrier_on(dev);
5157 } else {
5158 if (netif_carrier_ok(dev))
5159 netif_carrier_off(dev);
5160 }
5161}
5162EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5163
Tom Herbertbf264142010-11-26 08:36:09 +00005164#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005165static int netif_alloc_rx_queues(struct net_device *dev)
5166{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005167 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005168 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005169
Tom Herbertbd25fa72010-10-18 18:00:16 +00005170 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005171
Tom Herbertbd25fa72010-10-18 18:00:16 +00005172 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005173 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005174 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005175
Tom Herbertbd25fa72010-10-18 18:00:16 +00005176 dev->_rx = rx;
5177
Tom Herbertbd25fa72010-10-18 18:00:16 +00005178 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005179 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005180 return 0;
5181}
Tom Herbertbf264142010-11-26 08:36:09 +00005182#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005183
Changli Gaoaa942102010-12-04 02:31:41 +00005184static void netdev_init_one_queue(struct net_device *dev,
5185 struct netdev_queue *queue, void *_unused)
5186{
5187 /* Initialize queue lock */
5188 spin_lock_init(&queue->_xmit_lock);
5189 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5190 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005191 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005192 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005193#ifdef CONFIG_BQL
5194 dql_init(&queue->dql, HZ);
5195#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005196}
5197
Tom Herberte6484932010-10-18 18:04:39 +00005198static int netif_alloc_netdev_queues(struct net_device *dev)
5199{
5200 unsigned int count = dev->num_tx_queues;
5201 struct netdev_queue *tx;
5202
5203 BUG_ON(count < 1);
5204
5205 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005206 if (!tx)
Tom Herberte6484932010-10-18 18:04:39 +00005207 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005208
Tom Herberte6484932010-10-18 18:04:39 +00005209 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005210
Tom Herberte6484932010-10-18 18:04:39 +00005211 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5212 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005213
5214 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005215}
5216
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005217/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218 * register_netdevice - register a network device
5219 * @dev: device to register
5220 *
5221 * Take a completed network device structure and add it to the kernel
5222 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5223 * chain. 0 is returned on success. A negative errno code is returned
5224 * on a failure to set up the device, or if the name is a duplicate.
5225 *
5226 * Callers must hold the rtnl semaphore. You may want
5227 * register_netdev() instead of this.
5228 *
5229 * BUGS:
5230 * The locking appears insufficient to guarantee two parallel registers
5231 * will not get the same name.
5232 */
5233
5234int register_netdevice(struct net_device *dev)
5235{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005237 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238
5239 BUG_ON(dev_boot_phase);
5240 ASSERT_RTNL();
5241
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005242 might_sleep();
5243
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244 /* When net_device's are persistent, this will be fatal. */
5245 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005246 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247
David S. Millerf1f28aa2008-07-15 00:08:33 -07005248 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005249 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005250
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251 dev->iflink = -1;
5252
Gao feng828de4f2012-09-13 20:58:27 +00005253 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005254 if (ret < 0)
5255 goto out;
5256
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005258 if (dev->netdev_ops->ndo_init) {
5259 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260 if (ret) {
5261 if (ret > 0)
5262 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005263 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 }
5265 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005266
Patrick McHardyf6469682013-04-19 02:04:27 +00005267 if (((dev->hw_features | dev->features) &
5268 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005269 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5270 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5271 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5272 ret = -EINVAL;
5273 goto err_uninit;
5274 }
5275
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005276 ret = -EBUSY;
5277 if (!dev->ifindex)
5278 dev->ifindex = dev_new_index(net);
5279 else if (__dev_get_by_index(net, dev->ifindex))
5280 goto err_uninit;
5281
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 if (dev->iflink == -1)
5283 dev->iflink = dev->ifindex;
5284
Michał Mirosław5455c692011-02-15 16:59:17 +00005285 /* Transfer changeable features to wanted_features and enable
5286 * software offloads (GSO and GRO).
5287 */
5288 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005289 dev->features |= NETIF_F_SOFT_FEATURES;
5290 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005292 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00005293 if (!(dev->flags & IFF_LOOPBACK)) {
5294 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5295 if (dev->features & NETIF_F_ALL_CSUM) {
5296 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5297 dev->features |= NETIF_F_NOCACHE_COPY;
5298 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005299 }
5300
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005301 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005302 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005303 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005304
Pravin B Shelaree579672013-03-07 09:28:08 +00005305 /* Make NETIF_F_SG inheritable to tunnel devices.
5306 */
5307 dev->hw_enc_features |= NETIF_F_SG;
5308
Simon Horman0d89d202013-05-23 21:02:52 +00005309 /* Make NETIF_F_SG inheritable to MPLS.
5310 */
5311 dev->mpls_features |= NETIF_F_SG;
5312
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005313 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5314 ret = notifier_to_errno(ret);
5315 if (ret)
5316 goto err_uninit;
5317
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005318 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005319 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005320 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005321 dev->reg_state = NETREG_REGISTERED;
5322
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005323 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005324
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325 /*
5326 * Default initial state at registry is that the
5327 * device is present.
5328 */
5329
5330 set_bit(__LINK_STATE_PRESENT, &dev->state);
5331
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005332 linkwatch_init_dev(dev);
5333
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005336 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005337 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338
Jiri Pirko948b3372013-01-08 01:38:25 +00005339 /* If the device has permanent device address, driver should
5340 * set dev_addr and also addr_assign_type should be set to
5341 * NET_ADDR_PERM (default value).
5342 */
5343 if (dev->addr_assign_type == NET_ADDR_PERM)
5344 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5345
Linus Torvalds1da177e2005-04-16 15:20:36 -07005346 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005347 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005348 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005349 if (ret) {
5350 rollback_registered(dev);
5351 dev->reg_state = NETREG_UNREGISTERED;
5352 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005353 /*
5354 * Prevent userspace races by waiting until the network
5355 * device is fully setup before sending notifications.
5356 */
Patrick McHardya2835762010-02-26 06:34:51 +00005357 if (!dev->rtnl_link_ops ||
5358 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5359 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360
5361out:
5362 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005363
5364err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005365 if (dev->netdev_ops->ndo_uninit)
5366 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005367 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005369EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370
5371/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005372 * init_dummy_netdev - init a dummy network device for NAPI
5373 * @dev: device to init
5374 *
5375 * This takes a network device structure and initialize the minimum
5376 * amount of fields so it can be used to schedule NAPI polls without
5377 * registering a full blown interface. This is to be used by drivers
5378 * that need to tie several hardware interfaces to a single NAPI
5379 * poll scheduler due to HW limitations.
5380 */
5381int init_dummy_netdev(struct net_device *dev)
5382{
5383 /* Clear everything. Note we don't initialize spinlocks
5384 * are they aren't supposed to be taken by any of the
5385 * NAPI code and this dummy netdev is supposed to be
5386 * only ever used for NAPI polls
5387 */
5388 memset(dev, 0, sizeof(struct net_device));
5389
5390 /* make sure we BUG if trying to hit standard
5391 * register/unregister code path
5392 */
5393 dev->reg_state = NETREG_DUMMY;
5394
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005395 /* NAPI wants this */
5396 INIT_LIST_HEAD(&dev->napi_list);
5397
5398 /* a dummy interface is started by default */
5399 set_bit(__LINK_STATE_PRESENT, &dev->state);
5400 set_bit(__LINK_STATE_START, &dev->state);
5401
Eric Dumazet29b44332010-10-11 10:22:12 +00005402 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5403 * because users of this 'device' dont need to change
5404 * its refcount.
5405 */
5406
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005407 return 0;
5408}
5409EXPORT_SYMBOL_GPL(init_dummy_netdev);
5410
5411
5412/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413 * register_netdev - register a network device
5414 * @dev: device to register
5415 *
5416 * Take a completed network device structure and add it to the kernel
5417 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5418 * chain. 0 is returned on success. A negative errno code is returned
5419 * on a failure to set up the device, or if the name is a duplicate.
5420 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005421 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422 * and expands the device name if you passed a format string to
5423 * alloc_netdev.
5424 */
5425int register_netdev(struct net_device *dev)
5426{
5427 int err;
5428
5429 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431 rtnl_unlock();
5432 return err;
5433}
5434EXPORT_SYMBOL(register_netdev);
5435
Eric Dumazet29b44332010-10-11 10:22:12 +00005436int netdev_refcnt_read(const struct net_device *dev)
5437{
5438 int i, refcnt = 0;
5439
5440 for_each_possible_cpu(i)
5441 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5442 return refcnt;
5443}
5444EXPORT_SYMBOL(netdev_refcnt_read);
5445
Ben Hutchings2c530402012-07-10 10:55:09 +00005446/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005448 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449 *
5450 * This is called when unregistering network devices.
5451 *
5452 * Any protocol or device that holds a reference should register
5453 * for netdevice notification, and cleanup and put back the
5454 * reference if they receive an UNREGISTER event.
5455 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005456 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457 */
5458static void netdev_wait_allrefs(struct net_device *dev)
5459{
5460 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00005461 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462
Eric Dumazete014deb2009-11-17 05:59:21 +00005463 linkwatch_forget_dev(dev);
5464
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00005466 refcnt = netdev_refcnt_read(dev);
5467
5468 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005470 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471
5472 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005473 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005474
Eric Dumazet748e2d92012-08-22 21:50:59 +00005475 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005476 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00005477 rtnl_lock();
5478
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005479 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5481 &dev->state)) {
5482 /* We must not have linkwatch events
5483 * pending on unregister. If this
5484 * happens, we simply run the queue
5485 * unscheduled, resulting in a noop
5486 * for this device.
5487 */
5488 linkwatch_run_queue();
5489 }
5490
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005491 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005492
5493 rebroadcast_time = jiffies;
5494 }
5495
5496 msleep(250);
5497
Eric Dumazet29b44332010-10-11 10:22:12 +00005498 refcnt = netdev_refcnt_read(dev);
5499
Linus Torvalds1da177e2005-04-16 15:20:36 -07005500 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005501 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5502 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005503 warning_time = jiffies;
5504 }
5505 }
5506}
5507
5508/* The sequence is:
5509 *
5510 * rtnl_lock();
5511 * ...
5512 * register_netdevice(x1);
5513 * register_netdevice(x2);
5514 * ...
5515 * unregister_netdevice(y1);
5516 * unregister_netdevice(y2);
5517 * ...
5518 * rtnl_unlock();
5519 * free_netdev(y1);
5520 * free_netdev(y2);
5521 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005522 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005523 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005524 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005525 * without deadlocking with linkwatch via keventd.
5526 * 2) Since we run with the RTNL semaphore not held, we can sleep
5527 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005528 *
5529 * We must not return until all unregister events added during
5530 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005531 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005532void netdev_run_todo(void)
5533{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005534 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005535
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005537 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005538
5539 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005540
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005541
5542 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00005543 if (!list_empty(&list))
5544 rcu_barrier();
5545
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546 while (!list_empty(&list)) {
5547 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005548 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549 list_del(&dev->todo_list);
5550
Eric Dumazet748e2d92012-08-22 21:50:59 +00005551 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005552 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00005553 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005554
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005555 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005556 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07005557 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005558 dump_stack();
5559 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005561
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005562 dev->reg_state = NETREG_UNREGISTERED;
5563
Changli Gao152102c2010-03-30 20:16:22 +00005564 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005565
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005566 netdev_wait_allrefs(dev);
5567
5568 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00005569 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00005570 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5571 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005572 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005573
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005574 if (dev->destructor)
5575 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005576
5577 /* Free network device */
5578 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005579 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580}
5581
Ben Hutchings3cfde792010-07-09 09:11:52 +00005582/* Convert net_device_stats to rtnl_link_stats64. They have the same
5583 * fields in the same order, with only the type differing.
5584 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005585void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5586 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00005587{
5588#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005589 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5590 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00005591#else
5592 size_t i, n = sizeof(*stats64) / sizeof(u64);
5593 const unsigned long *src = (const unsigned long *)netdev_stats;
5594 u64 *dst = (u64 *)stats64;
5595
5596 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5597 sizeof(*stats64) / sizeof(u64));
5598 for (i = 0; i < n; i++)
5599 dst[i] = src[i];
5600#endif
5601}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005602EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00005603
Eric Dumazetd83345a2009-11-16 03:36:51 +00005604/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005605 * dev_get_stats - get network device statistics
5606 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07005607 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005608 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00005609 * Get network statistics from device. Return @storage.
5610 * The device driver may provide its own method by setting
5611 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5612 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005613 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00005614struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5615 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005616{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005617 const struct net_device_ops *ops = dev->netdev_ops;
5618
Eric Dumazet28172732010-07-07 14:58:56 -07005619 if (ops->ndo_get_stats64) {
5620 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005621 ops->ndo_get_stats64(dev, storage);
5622 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00005623 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005624 } else {
5625 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07005626 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005627 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07005628 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07005629}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005630EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005631
Eric Dumazet24824a02010-10-02 06:11:55 +00005632struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07005633{
Eric Dumazet24824a02010-10-02 06:11:55 +00005634 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07005635
Eric Dumazet24824a02010-10-02 06:11:55 +00005636#ifdef CONFIG_NET_CLS_ACT
5637 if (queue)
5638 return queue;
5639 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5640 if (!queue)
5641 return NULL;
5642 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00005643 queue->qdisc = &noop_qdisc;
5644 queue->qdisc_sleeping = &noop_qdisc;
5645 rcu_assign_pointer(dev->ingress_queue, queue);
5646#endif
5647 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07005648}
5649
Eric Dumazet2c60db02012-09-16 09:17:26 +00005650static const struct ethtool_ops default_ethtool_ops;
5651
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00005652void netdev_set_default_ethtool_ops(struct net_device *dev,
5653 const struct ethtool_ops *ops)
5654{
5655 if (dev->ethtool_ops == &default_ethtool_ops)
5656 dev->ethtool_ops = ops;
5657}
5658EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
5659
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660/**
Tom Herbert36909ea2011-01-09 19:36:31 +00005661 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005662 * @sizeof_priv: size of private data to allocate space for
5663 * @name: device name format string
5664 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00005665 * @txqs: the number of TX subqueues to allocate
5666 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005667 *
5668 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005669 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00005670 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 */
Tom Herbert36909ea2011-01-09 19:36:31 +00005672struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5673 void (*setup)(struct net_device *),
5674 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005676 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005677 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005678 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005680 BUG_ON(strlen(name) >= sizeof(dev->name));
5681
Tom Herbert36909ea2011-01-09 19:36:31 +00005682 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005683 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00005684 return NULL;
5685 }
5686
Tom Herbert36909ea2011-01-09 19:36:31 +00005687#ifdef CONFIG_RPS
5688 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005689 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00005690 return NULL;
5691 }
5692#endif
5693
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005694 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005695 if (sizeof_priv) {
5696 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005697 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005698 alloc_size += sizeof_priv;
5699 }
5700 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005701 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005702
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005703 p = kzalloc(alloc_size, GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005704 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005705 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005706
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005707 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005708 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005709
Eric Dumazet29b44332010-10-11 10:22:12 +00005710 dev->pcpu_refcnt = alloc_percpu(int);
5711 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00005712 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005713
Linus Torvalds1da177e2005-04-16 15:20:36 -07005714 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00005715 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005716
Jiri Pirko22bedad32010-04-01 21:22:57 +00005717 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005718 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00005719
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005720 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005722 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00005723 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005724
Herbert Xud565b0a2008-12-15 23:38:52 -08005725 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005726 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005727 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005728 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005729 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005731
5732 dev->num_tx_queues = txqs;
5733 dev->real_num_tx_queues = txqs;
5734 if (netif_alloc_netdev_queues(dev))
5735 goto free_all;
5736
5737#ifdef CONFIG_RPS
5738 dev->num_rx_queues = rxqs;
5739 dev->real_num_rx_queues = rxqs;
5740 if (netif_alloc_rx_queues(dev))
5741 goto free_all;
5742#endif
5743
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005745 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00005746 if (!dev->ethtool_ops)
5747 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005748 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005749
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005750free_all:
5751 free_netdev(dev);
5752 return NULL;
5753
Eric Dumazet29b44332010-10-11 10:22:12 +00005754free_pcpu:
5755 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00005756 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005757#ifdef CONFIG_RPS
5758 kfree(dev->_rx);
5759#endif
5760
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005761free_p:
5762 kfree(p);
5763 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005764}
Tom Herbert36909ea2011-01-09 19:36:31 +00005765EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005766
5767/**
5768 * free_netdev - free network device
5769 * @dev: device
5770 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005771 * This function does the last stage of destroying an allocated device
5772 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005773 * If this is the last reference then it will be freed.
5774 */
5775void free_netdev(struct net_device *dev)
5776{
Herbert Xud565b0a2008-12-15 23:38:52 -08005777 struct napi_struct *p, *n;
5778
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005779 release_net(dev_net(dev));
5780
David S. Millere8a04642008-07-17 00:34:19 -07005781 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005782#ifdef CONFIG_RPS
5783 kfree(dev->_rx);
5784#endif
David S. Millere8a04642008-07-17 00:34:19 -07005785
Eric Dumazet33d480c2011-08-11 19:30:52 +00005786 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00005787
Jiri Pirkof001fde2009-05-05 02:48:28 +00005788 /* Flush device addresses */
5789 dev_addr_flush(dev);
5790
Herbert Xud565b0a2008-12-15 23:38:52 -08005791 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5792 netif_napi_del(p);
5793
Eric Dumazet29b44332010-10-11 10:22:12 +00005794 free_percpu(dev->pcpu_refcnt);
5795 dev->pcpu_refcnt = NULL;
5796
Stephen Hemminger3041a062006-05-26 13:25:24 -07005797 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798 if (dev->reg_state == NETREG_UNINITIALIZED) {
5799 kfree((char *)dev - dev->padded);
5800 return;
5801 }
5802
5803 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5804 dev->reg_state = NETREG_RELEASED;
5805
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005806 /* will free via device release */
5807 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005808}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005809EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005810
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005811/**
5812 * synchronize_net - Synchronize with packet receive processing
5813 *
5814 * Wait for packets currently being received to be done.
5815 * Does not block later packets from starting.
5816 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005817void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005818{
5819 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00005820 if (rtnl_is_locked())
5821 synchronize_rcu_expedited();
5822 else
5823 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005824}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005825EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826
5827/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005828 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005830 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005831 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005832 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005833 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005834 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005835 *
5836 * Callers must hold the rtnl semaphore. You may want
5837 * unregister_netdev() instead of this.
5838 */
5839
Eric Dumazet44a08732009-10-27 07:03:04 +00005840void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005841{
Herbert Xua6620712007-12-12 19:21:56 -08005842 ASSERT_RTNL();
5843
Eric Dumazet44a08732009-10-27 07:03:04 +00005844 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005845 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005846 } else {
5847 rollback_registered(dev);
5848 /* Finish processing unregister after unlock */
5849 net_set_todo(dev);
5850 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005851}
Eric Dumazet44a08732009-10-27 07:03:04 +00005852EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005853
5854/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005855 * unregister_netdevice_many - unregister many devices
5856 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005857 */
5858void unregister_netdevice_many(struct list_head *head)
5859{
5860 struct net_device *dev;
5861
5862 if (!list_empty(head)) {
5863 rollback_registered_many(head);
5864 list_for_each_entry(dev, head, unreg_list)
5865 net_set_todo(dev);
5866 }
5867}
Eric Dumazet63c80992009-10-27 07:06:49 +00005868EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005869
5870/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005871 * unregister_netdev - remove device from the kernel
5872 * @dev: device
5873 *
5874 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005875 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005876 *
5877 * This is just a wrapper for unregister_netdevice that takes
5878 * the rtnl semaphore. In general you want to use this and not
5879 * unregister_netdevice.
5880 */
5881void unregister_netdev(struct net_device *dev)
5882{
5883 rtnl_lock();
5884 unregister_netdevice(dev);
5885 rtnl_unlock();
5886}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005887EXPORT_SYMBOL(unregister_netdev);
5888
Eric W. Biedermance286d32007-09-12 13:53:49 +02005889/**
5890 * dev_change_net_namespace - move device to different nethost namespace
5891 * @dev: device
5892 * @net: network namespace
5893 * @pat: If not NULL name pattern to try if the current device name
5894 * is already taken in the destination network namespace.
5895 *
5896 * This function shuts down a device interface and moves it
5897 * to a new network namespace. On success 0 is returned, on
5898 * a failure a netagive errno code is returned.
5899 *
5900 * Callers must hold the rtnl semaphore.
5901 */
5902
5903int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5904{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005905 int err;
5906
5907 ASSERT_RTNL();
5908
5909 /* Don't allow namespace local devices to be moved. */
5910 err = -EINVAL;
5911 if (dev->features & NETIF_F_NETNS_LOCAL)
5912 goto out;
5913
5914 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02005915 if (dev->reg_state != NETREG_REGISTERED)
5916 goto out;
5917
5918 /* Get out if there is nothing todo */
5919 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005920 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005921 goto out;
5922
5923 /* Pick the destination device name, and ensure
5924 * we can use it in the destination network namespace.
5925 */
5926 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005927 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005928 /* We get here if we can't use the current device name */
5929 if (!pat)
5930 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00005931 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005932 goto out;
5933 }
5934
5935 /*
5936 * And now a mini version of register_netdevice unregister_netdevice.
5937 */
5938
5939 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005940 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005941
5942 /* And unlink it from device chain */
5943 err = -ENODEV;
5944 unlist_netdevice(dev);
5945
5946 synchronize_net();
5947
5948 /* Shutdown queueing discipline. */
5949 dev_shutdown(dev);
5950
5951 /* Notify protocols, that we are about to destroy
5952 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00005953
5954 Note that dev->reg_state stays at NETREG_REGISTERED.
5955 This is wanted because this way 8021q and macvlan know
5956 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02005957 */
5958 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00005959 rcu_barrier();
5960 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00005961 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005962
5963 /*
5964 * Flush the unicast and multicast chains
5965 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005966 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005967 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005968
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005969 /* Send a netdev-removed uevent to the old namespace */
5970 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
5971
Eric W. Biedermance286d32007-09-12 13:53:49 +02005972 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005973 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005974
Eric W. Biedermance286d32007-09-12 13:53:49 +02005975 /* If there is an ifindex conflict assign a new one */
5976 if (__dev_get_by_index(net, dev->ifindex)) {
5977 int iflink = (dev->iflink == dev->ifindex);
5978 dev->ifindex = dev_new_index(net);
5979 if (iflink)
5980 dev->iflink = dev->ifindex;
5981 }
5982
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005983 /* Send a netdev-add uevent to the new namespace */
5984 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
5985
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005986 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07005987 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005988 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005989
5990 /* Add the device back in the hashes */
5991 list_netdevice(dev);
5992
5993 /* Notify protocols, that a new device appeared. */
5994 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5995
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005996 /*
5997 * Prevent userspace races by waiting until the network
5998 * device is fully setup before sending notifications.
5999 */
6000 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6001
Eric W. Biedermance286d32007-09-12 13:53:49 +02006002 synchronize_net();
6003 err = 0;
6004out:
6005 return err;
6006}
Johannes Berg463d0182009-07-14 00:33:35 +02006007EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006008
Linus Torvalds1da177e2005-04-16 15:20:36 -07006009static int dev_cpu_callback(struct notifier_block *nfb,
6010 unsigned long action,
6011 void *ocpu)
6012{
6013 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006014 struct sk_buff *skb;
6015 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6016 struct softnet_data *sd, *oldsd;
6017
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006018 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006019 return NOTIFY_OK;
6020
6021 local_irq_disable();
6022 cpu = smp_processor_id();
6023 sd = &per_cpu(softnet_data, cpu);
6024 oldsd = &per_cpu(softnet_data, oldcpu);
6025
6026 /* Find end of our completion_queue. */
6027 list_skb = &sd->completion_queue;
6028 while (*list_skb)
6029 list_skb = &(*list_skb)->next;
6030 /* Append completion queue from offline CPU. */
6031 *list_skb = oldsd->completion_queue;
6032 oldsd->completion_queue = NULL;
6033
Linus Torvalds1da177e2005-04-16 15:20:36 -07006034 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006035 if (oldsd->output_queue) {
6036 *sd->output_queue_tailp = oldsd->output_queue;
6037 sd->output_queue_tailp = oldsd->output_queue_tailp;
6038 oldsd->output_queue = NULL;
6039 oldsd->output_queue_tailp = &oldsd->output_queue;
6040 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006041 /* Append NAPI poll list from offline CPU. */
6042 if (!list_empty(&oldsd->poll_list)) {
6043 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6044 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046
6047 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6048 local_irq_enable();
6049
6050 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006051 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6052 netif_rx(skb);
6053 input_queue_head_incr(oldsd);
6054 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006055 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006056 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006057 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006059
6060 return NOTIFY_OK;
6061}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006062
6063
Herbert Xu7f353bf2007-08-10 15:47:58 -07006064/**
Herbert Xub63365a2008-10-23 01:11:29 -07006065 * netdev_increment_features - increment feature set by one
6066 * @all: current feature set
6067 * @one: new feature set
6068 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006069 *
6070 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006071 * @one to the master device with current feature set @all. Will not
6072 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006073 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006074netdev_features_t netdev_increment_features(netdev_features_t all,
6075 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006076{
Michał Mirosław1742f182011-04-22 06:31:16 +00006077 if (mask & NETIF_F_GEN_CSUM)
6078 mask |= NETIF_F_ALL_CSUM;
6079 mask |= NETIF_F_VLAN_CHALLENGED;
6080
6081 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6082 all &= one | ~NETIF_F_ALL_FOR_ALL;
6083
Michał Mirosław1742f182011-04-22 06:31:16 +00006084 /* If one device supports hw checksumming, set for all. */
6085 if (all & NETIF_F_GEN_CSUM)
6086 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006087
6088 return all;
6089}
Herbert Xub63365a2008-10-23 01:11:29 -07006090EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006091
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006092static struct hlist_head *netdev_create_hash(void)
6093{
6094 int i;
6095 struct hlist_head *hash;
6096
6097 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6098 if (hash != NULL)
6099 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6100 INIT_HLIST_HEAD(&hash[i]);
6101
6102 return hash;
6103}
6104
Eric W. Biederman881d9662007-09-17 11:56:21 -07006105/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006106static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006107{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006108 if (net != &init_net)
6109 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006110
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006111 net->dev_name_head = netdev_create_hash();
6112 if (net->dev_name_head == NULL)
6113 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006114
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006115 net->dev_index_head = netdev_create_hash();
6116 if (net->dev_index_head == NULL)
6117 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006118
6119 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006120
6121err_idx:
6122 kfree(net->dev_name_head);
6123err_name:
6124 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006125}
6126
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006127/**
6128 * netdev_drivername - network driver for the device
6129 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006130 *
6131 * Determine network driver for device.
6132 */
David S. Miller3019de12011-06-06 16:41:33 -07006133const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006134{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006135 const struct device_driver *driver;
6136 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006137 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006138
6139 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006140 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006141 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006142
6143 driver = parent->driver;
6144 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006145 return driver->name;
6146 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006147}
6148
Joe Perchesb004ff42012-09-12 20:12:19 -07006149static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006150 struct va_format *vaf)
6151{
6152 int r;
6153
Joe Perchesb004ff42012-09-12 20:12:19 -07006154 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006155 r = dev_printk_emit(level[1] - '0',
6156 dev->dev.parent,
6157 "%s %s %s: %pV",
6158 dev_driver_string(dev->dev.parent),
6159 dev_name(dev->dev.parent),
6160 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006161 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006162 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006163 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006164 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006165 }
Joe Perches256df2f2010-06-27 01:02:35 +00006166
6167 return r;
6168}
6169
6170int netdev_printk(const char *level, const struct net_device *dev,
6171 const char *format, ...)
6172{
6173 struct va_format vaf;
6174 va_list args;
6175 int r;
6176
6177 va_start(args, format);
6178
6179 vaf.fmt = format;
6180 vaf.va = &args;
6181
6182 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006183
Joe Perches256df2f2010-06-27 01:02:35 +00006184 va_end(args);
6185
6186 return r;
6187}
6188EXPORT_SYMBOL(netdev_printk);
6189
6190#define define_netdev_printk_level(func, level) \
6191int func(const struct net_device *dev, const char *fmt, ...) \
6192{ \
6193 int r; \
6194 struct va_format vaf; \
6195 va_list args; \
6196 \
6197 va_start(args, fmt); \
6198 \
6199 vaf.fmt = fmt; \
6200 vaf.va = &args; \
6201 \
6202 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006203 \
Joe Perches256df2f2010-06-27 01:02:35 +00006204 va_end(args); \
6205 \
6206 return r; \
6207} \
6208EXPORT_SYMBOL(func);
6209
6210define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6211define_netdev_printk_level(netdev_alert, KERN_ALERT);
6212define_netdev_printk_level(netdev_crit, KERN_CRIT);
6213define_netdev_printk_level(netdev_err, KERN_ERR);
6214define_netdev_printk_level(netdev_warn, KERN_WARNING);
6215define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6216define_netdev_printk_level(netdev_info, KERN_INFO);
6217
Pavel Emelyanov46650792007-10-08 20:38:39 -07006218static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006219{
6220 kfree(net->dev_name_head);
6221 kfree(net->dev_index_head);
6222}
6223
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006224static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006225 .init = netdev_init,
6226 .exit = netdev_exit,
6227};
6228
Pavel Emelyanov46650792007-10-08 20:38:39 -07006229static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006230{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006231 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006232 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006233 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006234 * initial network namespace
6235 */
6236 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006237 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006238 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006239 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006240
6241 /* Ignore unmoveable devices (i.e. loopback) */
6242 if (dev->features & NETIF_F_NETNS_LOCAL)
6243 continue;
6244
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006245 /* Leave virtual devices for the generic cleanup */
6246 if (dev->rtnl_link_ops)
6247 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006248
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006249 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006250 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6251 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006252 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006253 pr_emerg("%s: failed to move %s to init_net: %d\n",
6254 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006255 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006256 }
6257 }
6258 rtnl_unlock();
6259}
6260
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006261static void __net_exit default_device_exit_batch(struct list_head *net_list)
6262{
6263 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006264 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006265 * Do this across as many network namespaces as possible to
6266 * improve batching efficiency.
6267 */
6268 struct net_device *dev;
6269 struct net *net;
6270 LIST_HEAD(dev_kill_list);
6271
6272 rtnl_lock();
6273 list_for_each_entry(net, net_list, exit_list) {
6274 for_each_netdev_reverse(net, dev) {
6275 if (dev->rtnl_link_ops)
6276 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6277 else
6278 unregister_netdevice_queue(dev, &dev_kill_list);
6279 }
6280 }
6281 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006282 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006283 rtnl_unlock();
6284}
6285
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006286static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006287 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006288 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006289};
6290
Linus Torvalds1da177e2005-04-16 15:20:36 -07006291/*
6292 * Initialize the DEV module. At boot time this walks the device list and
6293 * unhooks any devices that fail to initialise (normally hardware not
6294 * present) and leaves us with a valid list of present and active devices.
6295 *
6296 */
6297
6298/*
6299 * This is called single threaded during boot, so no need
6300 * to take the rtnl semaphore.
6301 */
6302static int __init net_dev_init(void)
6303{
6304 int i, rc = -ENOMEM;
6305
6306 BUG_ON(!dev_boot_phase);
6307
Linus Torvalds1da177e2005-04-16 15:20:36 -07006308 if (dev_proc_init())
6309 goto out;
6310
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006311 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006312 goto out;
6313
6314 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006315 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006316 INIT_LIST_HEAD(&ptype_base[i]);
6317
Vlad Yasevich62532da2012-11-15 08:49:10 +00006318 INIT_LIST_HEAD(&offload_base);
6319
Eric W. Biederman881d9662007-09-17 11:56:21 -07006320 if (register_pernet_subsys(&netdev_net_ops))
6321 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006322
6323 /*
6324 * Initialise the packet receive queues.
6325 */
6326
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006327 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006328 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006329
Changli Gaodee42872010-05-02 05:42:16 +00006330 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006331 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006332 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006333 sd->completion_queue = NULL;
6334 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006335 sd->output_queue = NULL;
6336 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006337#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006338 sd->csd.func = rps_trigger_softirq;
6339 sd->csd.info = sd;
6340 sd->csd.flags = 0;
6341 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006342#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006343
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006344 sd->backlog.poll = process_backlog;
6345 sd->backlog.weight = weight_p;
6346 sd->backlog.gro_list = NULL;
6347 sd->backlog.gro_count = 0;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00006348
6349#ifdef CONFIG_NET_FLOW_LIMIT
6350 sd->flow_limit = NULL;
6351#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352 }
6353
Linus Torvalds1da177e2005-04-16 15:20:36 -07006354 dev_boot_phase = 0;
6355
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006356 /* The loopback device is special if any other network devices
6357 * is present in a network namespace the loopback device must
6358 * be present. Since we now dynamically allocate and free the
6359 * loopback device ensure this invariant is maintained by
6360 * keeping the loopback device as the first device on the
6361 * list of network devices. Ensuring the loopback devices
6362 * is the first device that appears and the last network device
6363 * that disappears.
6364 */
6365 if (register_pernet_device(&loopback_net_ops))
6366 goto out;
6367
6368 if (register_pernet_device(&default_device_ops))
6369 goto out;
6370
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006371 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6372 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006373
6374 hotcpu_notifier(dev_cpu_callback, 0);
6375 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006376 rc = 0;
6377out:
6378 return rc;
6379}
6380
6381subsys_initcall(net_dev_init);