blob: 7229bc30e509d309d1dd5f1be055c95212eaeb69 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000142static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000143struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
144struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000145static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700148 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 * semaphore.
150 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800151 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 *
153 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700154 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 * actual updates. This allows pure readers to access the list even
156 * while a writer is preparing to update it.
157 *
158 * To put it another way, dev_base_lock is held for writing only to
159 * protect against pure readers; the rtnl semaphore provides the
160 * protection against other writers.
161 *
162 * See, for example usages, register_netdevice() and
163 * unregister_netdevice(), which must be called with the rtnl
164 * semaphore held.
165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167EXPORT_SYMBOL(dev_base_lock);
168
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000169seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000170
Thomas Graf4e985ad2011-06-21 03:11:20 +0000171static inline void dev_base_seq_inc(struct net *net)
172{
173 while (++net->dev_base_seq == 0);
174}
175
Eric W. Biederman881d9662007-09-17 11:56:21 -0700176static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Eric Dumazet95c96172012-04-15 05:58:06 +0000178 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
179
stephen hemminger08e98972009-11-10 07:20:34 +0000180 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
Eric W. Biederman881d9662007-09-17 11:56:21 -0700183static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700185 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000188static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000189{
190#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000191 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000192#endif
193}
194
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000195static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000196{
197#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000198 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000199#endif
200}
201
Eric W. Biedermance286d32007-09-12 13:53:49 +0200202/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000203static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200204{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900205 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200206
207 ASSERT_RTNL();
208
209 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800210 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000211 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000212 hlist_add_head_rcu(&dev->index_hlist,
213 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000215
216 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200217}
218
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219/* Device list removal
220 * caller must respect a RCU grace period before freeing/reusing dev
221 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222static void unlist_netdevice(struct net_device *dev)
223{
224 ASSERT_RTNL();
225
226 /* Unlink dev from the device chain */
227 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800228 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000229 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000230 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000232
233 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234}
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/*
237 * Our notifier list
238 */
239
Alan Sternf07d5b92006-05-09 15:23:03 -0700240static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242/*
243 * Device drivers call our routines to queue packets here. We empty the
244 * queue in the local softnet handler.
245 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700246
Eric Dumazet9958da02010-04-17 04:17:02 +0000247DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700248EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
David S. Millercf508b12008-07-22 14:16:42 -0700250#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700251/*
David S. Millerc773e842008-07-08 23:13:53 -0700252 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253 * according to dev->type
254 */
255static const unsigned short netdev_lock_type[] =
256 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
257 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
258 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
259 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
260 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
261 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
262 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
263 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
264 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
265 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
266 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
267 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400268 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
269 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
270 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700271
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700272static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
274 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
275 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
276 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
277 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
278 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
279 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
280 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
281 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
282 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
283 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
284 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400285 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
286 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
287 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700288
289static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700290static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700291
292static inline unsigned short netdev_lock_pos(unsigned short dev_type)
293{
294 int i;
295
296 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
297 if (netdev_lock_type[i] == dev_type)
298 return i;
299 /* the last key is used by default */
300 return ARRAY_SIZE(netdev_lock_type) - 1;
301}
302
David S. Millercf508b12008-07-22 14:16:42 -0700303static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
304 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305{
306 int i;
307
308 i = netdev_lock_pos(dev_type);
309 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
310 netdev_lock_name[i]);
311}
David S. Millercf508b12008-07-22 14:16:42 -0700312
313static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
314{
315 int i;
316
317 i = netdev_lock_pos(dev->type);
318 lockdep_set_class_and_name(&dev->addr_list_lock,
319 &netdev_addr_lock_key[i],
320 netdev_lock_name[i]);
321}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700322#else
David S. Millercf508b12008-07-22 14:16:42 -0700323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
325{
326}
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700328{
329}
330#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332/*******************************************************************************
333
334 Protocol management and registration routines
335
336*******************************************************************************/
337
338/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * Add a protocol ID to the list. Now that the input handler is
340 * smarter we can dispense with all the messy stuff that used to be
341 * here.
342 *
343 * BEWARE!!! Protocol handlers, mangling input packets,
344 * MUST BE last in hash buckets and checking protocol handlers
345 * MUST start from promiscuous ptype_all chain in net_bh.
346 * It is true now, do not change it.
347 * Explanation follows: if protocol handler, mangling packet, will
348 * be the first on list, it is not able to sense, that packet
349 * is cloned and should be copied-on-write, so that it will
350 * change it and subsequent readers will get broken packet.
351 * --ANK (980803)
352 */
353
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000354static inline struct list_head *ptype_head(const struct packet_type *pt)
355{
356 if (pt->type == htons(ETH_P_ALL))
357 return &ptype_all;
358 else
359 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
360}
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362/**
363 * dev_add_pack - add packet handler
364 * @pt: packet type declaration
365 *
366 * Add a protocol handler to the networking stack. The passed &packet_type
367 * is linked into kernel lists and may not be freed until it has been
368 * removed from the kernel lists.
369 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900370 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 * guarantee all CPU's that are in middle of receiving packets
372 * will see the new packet type (until the next received packet).
373 */
374
375void dev_add_pack(struct packet_type *pt)
376{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000377 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000379 spin_lock(&ptype_lock);
380 list_add_rcu(&pt->list, head);
381 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700383EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
388 *
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 *
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
397 */
398void __dev_remove_pack(struct packet_type *pt)
399{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000400 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct packet_type *pt1;
402
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000403 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 list_for_each_entry(pt1, head, list) {
406 if (pt == pt1) {
407 list_del_rcu(&pt->list);
408 goto out;
409 }
410 }
411
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000412 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000414 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700416EXPORT_SYMBOL(__dev_remove_pack);
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/**
419 * dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
421 *
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
425 * returns.
426 *
427 * This call sleeps to guarantee that no CPU is looking at the packet
428 * type after return.
429 */
430void dev_remove_pack(struct packet_type *pt)
431{
432 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 synchronize_net();
435}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700436EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Vlad Yasevich62532da2012-11-15 08:49:10 +0000438
439/**
440 * dev_add_offload - register offload handlers
441 * @po: protocol offload declaration
442 *
443 * Add protocol offload handlers to the networking stack. The passed
444 * &proto_offload is linked into kernel lists and may not be freed until
445 * it has been removed from the kernel lists.
446 *
447 * This call does not sleep therefore it can not
448 * guarantee all CPU's that are in middle of receiving packets
449 * will see the new offload handlers (until the next received packet).
450 */
451void dev_add_offload(struct packet_offload *po)
452{
453 struct list_head *head = &offload_base;
454
455 spin_lock(&offload_lock);
456 list_add_rcu(&po->list, head);
457 spin_unlock(&offload_lock);
458}
459EXPORT_SYMBOL(dev_add_offload);
460
461/**
462 * __dev_remove_offload - remove offload handler
463 * @po: packet offload declaration
464 *
465 * Remove a protocol offload handler that was previously added to the
466 * kernel offload handlers by dev_add_offload(). The passed &offload_type
467 * is removed from the kernel lists and can be freed or reused once this
468 * function returns.
469 *
470 * The packet type might still be in use by receivers
471 * and must not be freed until after all the CPU's have gone
472 * through a quiescent state.
473 */
474void __dev_remove_offload(struct packet_offload *po)
475{
476 struct list_head *head = &offload_base;
477 struct packet_offload *po1;
478
Eric Dumazetc53aa502012-11-16 08:08:23 +0000479 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000480
481 list_for_each_entry(po1, head, list) {
482 if (po == po1) {
483 list_del_rcu(&po->list);
484 goto out;
485 }
486 }
487
488 pr_warn("dev_remove_offload: %p not found\n", po);
489out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000490 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000491}
492EXPORT_SYMBOL(__dev_remove_offload);
493
494/**
495 * dev_remove_offload - remove packet offload handler
496 * @po: packet offload declaration
497 *
498 * Remove a packet offload handler that was previously added to the kernel
499 * offload handlers by dev_add_offload(). The passed &offload_type is
500 * removed from the kernel lists and can be freed or reused once this
501 * function returns.
502 *
503 * This call sleeps to guarantee that no CPU is looking at the packet
504 * type after return.
505 */
506void dev_remove_offload(struct packet_offload *po)
507{
508 __dev_remove_offload(po);
509
510 synchronize_net();
511}
512EXPORT_SYMBOL(dev_remove_offload);
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514/******************************************************************************
515
516 Device Boot-time Settings Routines
517
518*******************************************************************************/
519
520/* Boot time configuration table */
521static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
522
523/**
524 * netdev_boot_setup_add - add new setup entry
525 * @name: name of the device
526 * @map: configured settings for the device
527 *
528 * Adds new setup entry to the dev_boot_setup list. The function
529 * returns 0 on error and 1 on success. This is a generic routine to
530 * all netdevices.
531 */
532static int netdev_boot_setup_add(char *name, struct ifmap *map)
533{
534 struct netdev_boot_setup *s;
535 int i;
536
537 s = dev_boot_setup;
538 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
539 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
540 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700541 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 memcpy(&s[i].map, map, sizeof(s[i].map));
543 break;
544 }
545 }
546
547 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
548}
549
550/**
551 * netdev_boot_setup_check - check boot time settings
552 * @dev: the netdevice
553 *
554 * Check boot time settings for the device.
555 * The found settings are set for the device to be used
556 * later in the device probing.
557 * Returns 0 if no settings found, 1 if they are.
558 */
559int netdev_boot_setup_check(struct net_device *dev)
560{
561 struct netdev_boot_setup *s = dev_boot_setup;
562 int i;
563
564 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
565 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700566 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 dev->irq = s[i].map.irq;
568 dev->base_addr = s[i].map.base_addr;
569 dev->mem_start = s[i].map.mem_start;
570 dev->mem_end = s[i].map.mem_end;
571 return 1;
572 }
573 }
574 return 0;
575}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700576EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578
579/**
580 * netdev_boot_base - get address from boot time settings
581 * @prefix: prefix for network device
582 * @unit: id for network device
583 *
584 * Check boot time settings for the base address of device.
585 * The found settings are set for the device to be used
586 * later in the device probing.
587 * Returns 0 if no settings found.
588 */
589unsigned long netdev_boot_base(const char *prefix, int unit)
590{
591 const struct netdev_boot_setup *s = dev_boot_setup;
592 char name[IFNAMSIZ];
593 int i;
594
595 sprintf(name, "%s%d", prefix, unit);
596
597 /*
598 * If device already registered then return base of 1
599 * to indicate not to probe for this interface
600 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700601 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return 1;
603
604 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
605 if (!strcmp(name, s[i].name))
606 return s[i].map.base_addr;
607 return 0;
608}
609
610/*
611 * Saves at boot time configured settings for any netdevice.
612 */
613int __init netdev_boot_setup(char *str)
614{
615 int ints[5];
616 struct ifmap map;
617
618 str = get_options(str, ARRAY_SIZE(ints), ints);
619 if (!str || !*str)
620 return 0;
621
622 /* Save settings */
623 memset(&map, 0, sizeof(map));
624 if (ints[0] > 0)
625 map.irq = ints[1];
626 if (ints[0] > 1)
627 map.base_addr = ints[2];
628 if (ints[0] > 2)
629 map.mem_start = ints[3];
630 if (ints[0] > 3)
631 map.mem_end = ints[4];
632
633 /* Add new entry to the list */
634 return netdev_boot_setup_add(str, &map);
635}
636
637__setup("netdev=", netdev_boot_setup);
638
639/*******************************************************************************
640
641 Device Interface Subroutines
642
643*******************************************************************************/
644
645/**
646 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @name: name to find
649 *
650 * Find an interface by name. Must be called under RTNL semaphore
651 * or @dev_base_lock. If the name is found a pointer to the device
652 * is returned. If the name is not found then %NULL is returned. The
653 * reference counters are not incremented so the caller must be
654 * careful with locks.
655 */
656
Eric W. Biederman881d9662007-09-17 11:56:21 -0700657struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700659 struct net_device *dev;
660 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Sasha Levinb67bfe02013-02-27 17:06:00 -0800662 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (!strncmp(dev->name, name, IFNAMSIZ))
664 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return NULL;
667}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700668EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000671 * dev_get_by_name_rcu - find a device by its name
672 * @net: the applicable net namespace
673 * @name: name to find
674 *
675 * Find an interface by name.
676 * If the name is found a pointer to the device is returned.
677 * If the name is not found then %NULL is returned.
678 * The reference counters are not incremented so the caller must be
679 * careful with locks. The caller must hold RCU lock.
680 */
681
682struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
683{
Eric Dumazet72c95282009-10-30 07:11:27 +0000684 struct net_device *dev;
685 struct hlist_head *head = dev_name_hash(net, name);
686
Sasha Levinb67bfe02013-02-27 17:06:00 -0800687 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000688 if (!strncmp(dev->name, name, IFNAMSIZ))
689 return dev;
690
691 return NULL;
692}
693EXPORT_SYMBOL(dev_get_by_name_rcu);
694
695/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700697 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 * @name: name to find
699 *
700 * Find an interface by name. This can be called from any
701 * context and does its own locking. The returned handle has
702 * the usage count incremented and the caller must use dev_put() to
703 * release it when it is no longer needed. %NULL is returned if no
704 * matching device is found.
705 */
706
Eric W. Biederman881d9662007-09-17 11:56:21 -0700707struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 struct net_device *dev;
710
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 rcu_read_lock();
712 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (dev)
714 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000715 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return dev;
717}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700718EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720/**
721 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700722 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 * @ifindex: index of device
724 *
725 * Search for an interface by index. Returns %NULL if the device
726 * is not found or a pointer to the device. The device has not
727 * had its reference counter increased so the caller must be careful
728 * about locking. The caller must hold either the RTNL semaphore
729 * or @dev_base_lock.
730 */
731
Eric W. Biederman881d9662007-09-17 11:56:21 -0700732struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700734 struct net_device *dev;
735 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Sasha Levinb67bfe02013-02-27 17:06:00 -0800737 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 if (dev->ifindex == ifindex)
739 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return NULL;
742}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700743EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000745/**
746 * dev_get_by_index_rcu - find a device by its ifindex
747 * @net: the applicable net namespace
748 * @ifindex: index of device
749 *
750 * Search for an interface by index. Returns %NULL if the device
751 * is not found or a pointer to the device. The device has not
752 * had its reference counter increased so the caller must be careful
753 * about locking. The caller must hold RCU lock.
754 */
755
756struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
757{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000758 struct net_device *dev;
759 struct hlist_head *head = dev_index_hash(net, ifindex);
760
Sasha Levinb67bfe02013-02-27 17:06:00 -0800761 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000762 if (dev->ifindex == ifindex)
763 return dev;
764
765 return NULL;
766}
767EXPORT_SYMBOL(dev_get_by_index_rcu);
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770/**
771 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700772 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * @ifindex: index of device
774 *
775 * Search for an interface by index. Returns NULL if the device
776 * is not found or a pointer to the device. The device returned has
777 * had a reference added and the pointer is safe until the user calls
778 * dev_put to indicate they have finished with it.
779 */
780
Eric W. Biederman881d9662007-09-17 11:56:21 -0700781struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 struct net_device *dev;
784
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000785 rcu_read_lock();
786 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (dev)
788 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000789 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return dev;
791}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700792EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000795 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700796 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 * @type: media type of device
798 * @ha: hardware address
799 *
800 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800801 * is not found or a pointer to the device.
802 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000803 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 * and the caller must therefore be careful about locking
805 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 */
807
Eric Dumazet941666c2010-12-05 01:23:53 +0000808struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
809 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810{
811 struct net_device *dev;
812
Eric Dumazet941666c2010-12-05 01:23:53 +0000813 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (dev->type == type &&
815 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700816 return dev;
817
818 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
Eric Dumazet941666c2010-12-05 01:23:53 +0000820EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300821
Eric W. Biederman881d9662007-09-17 11:56:21 -0700822struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700823{
824 struct net_device *dev;
825
826 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700827 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700828 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700829 return dev;
830
831 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700832}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700833EXPORT_SYMBOL(__dev_getfirstbyhwtype);
834
Eric W. Biederman881d9662007-09-17 11:56:21 -0700835struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000837 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000839 rcu_read_lock();
840 for_each_netdev_rcu(net, dev)
841 if (dev->type == type) {
842 dev_hold(dev);
843 ret = dev;
844 break;
845 }
846 rcu_read_unlock();
847 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849EXPORT_SYMBOL(dev_getfirstbyhwtype);
850
851/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000852 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700853 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 * @if_flags: IFF_* values
855 * @mask: bitmask of bits in if_flags to check
856 *
857 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000858 * is not found or a pointer to the device. Must be called inside
859 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 */
861
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000862struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700863 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700865 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Pavel Emelianov7562f872007-05-03 15:13:45 -0700867 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800868 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700870 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 break;
872 }
873 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700874 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000876EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878/**
879 * dev_valid_name - check if name is okay for network device
880 * @name: name string
881 *
882 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700883 * to allow sysfs to work. We also disallow any kind of
884 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 */
David S. Miller95f050b2012-03-06 16:12:15 -0500886bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700888 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500889 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700890 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500891 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700892 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500893 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700894
895 while (*name) {
896 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500897 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700898 name++;
899 }
David S. Miller95f050b2012-03-06 16:12:15 -0500900 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700902EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200905 * __dev_alloc_name - allocate a name for a device
906 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200908 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 *
910 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700911 * id. It scans list of devices to build up a free map, then chooses
912 * the first empty slot. The caller must hold the dev_base or rtnl lock
913 * while allocating the name and adding the device in order to avoid
914 * duplicates.
915 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
916 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 */
918
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200919static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
921 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 const char *p;
923 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700924 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 struct net_device *d;
926
927 p = strnchr(name, IFNAMSIZ-1, '%');
928 if (p) {
929 /*
930 * Verify the string as this thing may have come from
931 * the user. There must be either one "%d" and no other "%"
932 * characters.
933 */
934 if (p[1] != 'd' || strchr(p + 2, '%'))
935 return -EINVAL;
936
937 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700938 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 if (!inuse)
940 return -ENOMEM;
941
Eric W. Biederman881d9662007-09-17 11:56:21 -0700942 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (!sscanf(d->name, name, &i))
944 continue;
945 if (i < 0 || i >= max_netdevices)
946 continue;
947
948 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200949 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if (!strncmp(buf, d->name, IFNAMSIZ))
951 set_bit(i, inuse);
952 }
953
954 i = find_first_zero_bit(inuse, max_netdevices);
955 free_page((unsigned long) inuse);
956 }
957
Octavian Purdilad9031022009-11-18 02:36:59 +0000958 if (buf != name)
959 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200960 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963 /* It is possible to run out of possible slots
964 * when the name is long and there isn't enough space left
965 * for the digits, or if all bits are used.
966 */
967 return -ENFILE;
968}
969
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200970/**
971 * dev_alloc_name - allocate a name for a device
972 * @dev: device
973 * @name: name format string
974 *
975 * Passed a format string - eg "lt%d" it will try and find a suitable
976 * id. It scans list of devices to build up a free map, then chooses
977 * the first empty slot. The caller must hold the dev_base or rtnl lock
978 * while allocating the name and adding the device in order to avoid
979 * duplicates.
980 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
981 * Returns the number of the unit assigned or a negative errno code.
982 */
983
984int dev_alloc_name(struct net_device *dev, const char *name)
985{
986 char buf[IFNAMSIZ];
987 struct net *net;
988 int ret;
989
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900990 BUG_ON(!dev_net(dev));
991 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 ret = __dev_alloc_name(net, name, buf);
993 if (ret >= 0)
994 strlcpy(dev->name, buf, IFNAMSIZ);
995 return ret;
996}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700997EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200998
Gao feng828de4f2012-09-13 20:58:27 +0000999static int dev_alloc_name_ns(struct net *net,
1000 struct net_device *dev,
1001 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001002{
Gao feng828de4f2012-09-13 20:58:27 +00001003 char buf[IFNAMSIZ];
1004 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001005
Gao feng828de4f2012-09-13 20:58:27 +00001006 ret = __dev_alloc_name(net, name, buf);
1007 if (ret >= 0)
1008 strlcpy(dev->name, buf, IFNAMSIZ);
1009 return ret;
1010}
1011
1012static int dev_get_valid_name(struct net *net,
1013 struct net_device *dev,
1014 const char *name)
1015{
1016 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001017
Octavian Purdilad9031022009-11-18 02:36:59 +00001018 if (!dev_valid_name(name))
1019 return -EINVAL;
1020
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001021 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001022 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001023 else if (__dev_get_by_name(net, name))
1024 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001025 else if (dev->name != name)
1026 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001027
1028 return 0;
1029}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031/**
1032 * dev_change_name - change name of a device
1033 * @dev: device
1034 * @newname: name (or format string) must be at least IFNAMSIZ
1035 *
1036 * Change name of a device, can pass format strings "eth%d".
1037 * for wildcarding.
1038 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001039int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040{
Herbert Xufcc5a032007-07-30 17:03:38 -07001041 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001043 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001044 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001047 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001049 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 if (dev->flags & IFF_UP)
1051 return -EBUSY;
1052
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001053 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001054
1055 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001056 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001057 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001058 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001059
Herbert Xufcc5a032007-07-30 17:03:38 -07001060 memcpy(oldname, dev->name, IFNAMSIZ);
1061
Gao feng828de4f2012-09-13 20:58:27 +00001062 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001063 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001064 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Herbert Xufcc5a032007-07-30 17:03:38 -07001068rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001069 ret = device_rename(&dev->dev, dev->name);
1070 if (ret) {
1071 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001072 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001073 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001074 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001075
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001076 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001077
Herbert Xu7f988ea2007-07-30 16:35:46 -07001078 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001079 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001080 write_unlock_bh(&dev_base_lock);
1081
1082 synchronize_rcu();
1083
1084 write_lock_bh(&dev_base_lock);
1085 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001086 write_unlock_bh(&dev_base_lock);
1087
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001088 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001089 ret = notifier_to_errno(ret);
1090
1091 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001092 /* err >= 0 after dev_alloc_name() or stores the first errno */
1093 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001094 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001095 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001096 memcpy(dev->name, oldname, IFNAMSIZ);
1097 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001098 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001099 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001100 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001101 }
1102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 return err;
1105}
1106
1107/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001108 * dev_set_alias - change ifalias of a device
1109 * @dev: device
1110 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001111 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001112 *
1113 * Set ifalias for a device,
1114 */
1115int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1116{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001117 char *new_ifalias;
1118
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001119 ASSERT_RTNL();
1120
1121 if (len >= IFALIASZ)
1122 return -EINVAL;
1123
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001124 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001125 kfree(dev->ifalias);
1126 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001127 return 0;
1128 }
1129
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001130 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1131 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001132 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001133 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001134
1135 strlcpy(dev->ifalias, alias, len+1);
1136 return len;
1137}
1138
1139
1140/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001141 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001142 * @dev: device to cause notification
1143 *
1144 * Called to indicate a device has changed features.
1145 */
1146void netdev_features_change(struct net_device *dev)
1147{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001148 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001149}
1150EXPORT_SYMBOL(netdev_features_change);
1151
1152/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 * netdev_state_change - device changes state
1154 * @dev: device to cause notification
1155 *
1156 * Called to indicate a device has changed state. This function calls
1157 * the notifier chains for netdev_chain and sends a NEWLINK message
1158 * to the routing socket.
1159 */
1160void netdev_state_change(struct net_device *dev)
1161{
1162 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001163 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1165 }
1166}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001167EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Amerigo Wangee89bab2012-08-09 22:14:56 +00001169/**
1170 * netdev_notify_peers - notify network peers about existence of @dev
1171 * @dev: network device
1172 *
1173 * Generate traffic such that interested network peers are aware of
1174 * @dev, such as by generating a gratuitous ARP. This may be used when
1175 * a device wants to inform the rest of the network about some sort of
1176 * reconfiguration such as a failover event or virtual machine
1177 * migration.
1178 */
1179void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001180{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001181 rtnl_lock();
1182 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1183 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001184}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001185EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001186
Patrick McHardybd380812010-02-26 06:34:53 +00001187static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001189 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001190 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001192 ASSERT_RTNL();
1193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 if (!netif_device_present(dev))
1195 return -ENODEV;
1196
Neil Hormanca99ca12013-02-05 08:05:43 +00001197 /* Block netpoll from trying to do any rx path servicing.
1198 * If we don't do this there is a chance ndo_poll_controller
1199 * or ndo_poll may be running while we open the device
1200 */
1201 ret = netpoll_rx_disable(dev);
1202 if (ret)
1203 return ret;
1204
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001205 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1206 ret = notifier_to_errno(ret);
1207 if (ret)
1208 return ret;
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001211
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001212 if (ops->ndo_validate_addr)
1213 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001214
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001215 if (!ret && ops->ndo_open)
1216 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Neil Hormanca99ca12013-02-05 08:05:43 +00001218 netpoll_rx_enable(dev);
1219
Jeff Garzikbada3392007-10-23 20:19:37 -07001220 if (ret)
1221 clear_bit(__LINK_STATE_START, &dev->state);
1222 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001224 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001225 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001227 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 return ret;
1231}
Patrick McHardybd380812010-02-26 06:34:53 +00001232
1233/**
1234 * dev_open - prepare an interface for use.
1235 * @dev: device to open
1236 *
1237 * Takes a device from down to up state. The device's private open
1238 * function is invoked and then the multicast lists are loaded. Finally
1239 * the device is moved into the up state and a %NETDEV_UP message is
1240 * sent to the netdev notifier chain.
1241 *
1242 * Calling this function on an active interface is a nop. On a failure
1243 * a negative errno code is returned.
1244 */
1245int dev_open(struct net_device *dev)
1246{
1247 int ret;
1248
Patrick McHardybd380812010-02-26 06:34:53 +00001249 if (dev->flags & IFF_UP)
1250 return 0;
1251
Patrick McHardybd380812010-02-26 06:34:53 +00001252 ret = __dev_open(dev);
1253 if (ret < 0)
1254 return ret;
1255
Patrick McHardybd380812010-02-26 06:34:53 +00001256 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1257 call_netdevice_notifiers(NETDEV_UP, dev);
1258
1259 return ret;
1260}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001261EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
Octavian Purdila44345722010-12-13 12:44:07 +00001263static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264{
Octavian Purdila44345722010-12-13 12:44:07 +00001265 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001266
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001267 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001268 might_sleep();
1269
Octavian Purdila44345722010-12-13 12:44:07 +00001270 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001271 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Octavian Purdila44345722010-12-13 12:44:07 +00001273 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Octavian Purdila44345722010-12-13 12:44:07 +00001275 /* Synchronize to scheduled poll. We cannot touch poll list, it
1276 * can be even on different cpu. So just clear netif_running().
1277 *
1278 * dev->stop() will invoke napi_disable() on all of it's
1279 * napi_struct instances on this device.
1280 */
1281 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
Octavian Purdila44345722010-12-13 12:44:07 +00001284 dev_deactivate_many(head);
1285
1286 list_for_each_entry(dev, head, unreg_list) {
1287 const struct net_device_ops *ops = dev->netdev_ops;
1288
1289 /*
1290 * Call the device specific close. This cannot fail.
1291 * Only if device is UP
1292 *
1293 * We allow it to be called even after a DETACH hot-plug
1294 * event.
1295 */
1296 if (ops->ndo_stop)
1297 ops->ndo_stop(dev);
1298
Octavian Purdila44345722010-12-13 12:44:07 +00001299 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001300 net_dmaengine_put();
1301 }
1302
1303 return 0;
1304}
1305
1306static int __dev_close(struct net_device *dev)
1307{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001308 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001309 LIST_HEAD(single);
1310
Neil Hormanca99ca12013-02-05 08:05:43 +00001311 /* Temporarily disable netpoll until the interface is down */
1312 retval = netpoll_rx_disable(dev);
1313 if (retval)
1314 return retval;
1315
Octavian Purdila44345722010-12-13 12:44:07 +00001316 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001317 retval = __dev_close_many(&single);
1318 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001319
1320 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001321 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001322}
1323
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001324static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001325{
1326 struct net_device *dev, *tmp;
1327 LIST_HEAD(tmp_list);
1328
1329 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1330 if (!(dev->flags & IFF_UP))
1331 list_move(&dev->unreg_list, &tmp_list);
1332
1333 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001334
Octavian Purdila44345722010-12-13 12:44:07 +00001335 list_for_each_entry(dev, head, unreg_list) {
1336 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1337 call_netdevice_notifiers(NETDEV_DOWN, dev);
1338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Octavian Purdila44345722010-12-13 12:44:07 +00001340 /* rollback_registered_many needs the complete original list */
1341 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 return 0;
1343}
Patrick McHardybd380812010-02-26 06:34:53 +00001344
1345/**
1346 * dev_close - shutdown an interface.
1347 * @dev: device to shutdown
1348 *
1349 * This function moves an active device into down state. A
1350 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1351 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1352 * chain.
1353 */
1354int dev_close(struct net_device *dev)
1355{
Neil Hormanca99ca12013-02-05 08:05:43 +00001356 int ret = 0;
Eric Dumazete14a5992011-05-10 12:26:06 -07001357 if (dev->flags & IFF_UP) {
1358 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001359
Neil Hormanca99ca12013-02-05 08:05:43 +00001360 /* Block netpoll rx while the interface is going down */
1361 ret = netpoll_rx_disable(dev);
1362 if (ret)
1363 return ret;
1364
Eric Dumazete14a5992011-05-10 12:26:06 -07001365 list_add(&dev->unreg_list, &single);
1366 dev_close_many(&single);
1367 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001368
1369 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001370 }
Neil Hormanca99ca12013-02-05 08:05:43 +00001371 return ret;
Patrick McHardybd380812010-02-26 06:34:53 +00001372}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001373EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001376/**
1377 * dev_disable_lro - disable Large Receive Offload on a device
1378 * @dev: device
1379 *
1380 * Disable Large Receive Offload (LRO) on a net device. Must be
1381 * called under RTNL. This is needed if received packets may be
1382 * forwarded to another interface.
1383 */
1384void dev_disable_lro(struct net_device *dev)
1385{
Neil Hormanf11970e2011-05-24 08:31:09 +00001386 /*
1387 * If we're trying to disable lro on a vlan device
1388 * use the underlying physical device instead
1389 */
1390 if (is_vlan_dev(dev))
1391 dev = vlan_dev_real_dev(dev);
1392
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001393 dev->wanted_features &= ~NETIF_F_LRO;
1394 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001395
Michał Mirosław22d59692011-04-21 12:42:15 +00001396 if (unlikely(dev->features & NETIF_F_LRO))
1397 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001398}
1399EXPORT_SYMBOL(dev_disable_lro);
1400
1401
Eric W. Biederman881d9662007-09-17 11:56:21 -07001402static int dev_boot_phase = 1;
1403
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404/**
1405 * register_netdevice_notifier - register a network notifier block
1406 * @nb: notifier
1407 *
1408 * Register a notifier to be called when network device events occur.
1409 * The notifier passed is linked into the kernel structures and must
1410 * not be reused until it has been unregistered. A negative errno code
1411 * is returned on a failure.
1412 *
1413 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001414 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 * view of the network device list.
1416 */
1417
1418int register_netdevice_notifier(struct notifier_block *nb)
1419{
1420 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001421 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001422 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 int err;
1424
1425 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001426 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001427 if (err)
1428 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001429 if (dev_boot_phase)
1430 goto unlock;
1431 for_each_net(net) {
1432 for_each_netdev(net, dev) {
1433 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1434 err = notifier_to_errno(err);
1435 if (err)
1436 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Eric W. Biederman881d9662007-09-17 11:56:21 -07001438 if (!(dev->flags & IFF_UP))
1439 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001440
Eric W. Biederman881d9662007-09-17 11:56:21 -07001441 nb->notifier_call(nb, NETDEV_UP, dev);
1442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001444
1445unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 rtnl_unlock();
1447 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001448
1449rollback:
1450 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001451 for_each_net(net) {
1452 for_each_netdev(net, dev) {
1453 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001454 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001455
Eric W. Biederman881d9662007-09-17 11:56:21 -07001456 if (dev->flags & IFF_UP) {
1457 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1458 nb->notifier_call(nb, NETDEV_DOWN, dev);
1459 }
1460 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001461 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001462 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001463
RongQing.Li8f891482011-11-30 23:43:07 -05001464outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001465 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001466 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001468EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470/**
1471 * unregister_netdevice_notifier - unregister a network notifier block
1472 * @nb: notifier
1473 *
1474 * Unregister a notifier previously registered by
1475 * register_netdevice_notifier(). The notifier is unlinked into the
1476 * kernel structures and may then be reused. A negative errno code
1477 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001478 *
1479 * After unregistering unregister and down device events are synthesized
1480 * for all devices on the device list to the removed notifier to remove
1481 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 */
1483
1484int unregister_netdevice_notifier(struct notifier_block *nb)
1485{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001486 struct net_device *dev;
1487 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001488 int err;
1489
1490 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001491 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001492 if (err)
1493 goto unlock;
1494
1495 for_each_net(net) {
1496 for_each_netdev(net, dev) {
1497 if (dev->flags & IFF_UP) {
1498 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1499 nb->notifier_call(nb, NETDEV_DOWN, dev);
1500 }
1501 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001502 }
1503 }
1504unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001505 rtnl_unlock();
1506 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001508EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510/**
1511 * call_netdevice_notifiers - call all network notifier blocks
1512 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001513 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 *
1515 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001516 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 */
1518
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001519int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520{
Jiri Pirkoab930472010-04-20 01:45:37 -07001521 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001522 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001524EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
Ingo Molnarc5905af2012-02-24 08:31:31 +01001526static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001527#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001528/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001529 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001530 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001531 */
1532static atomic_t netstamp_needed_deferred;
1533#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535void net_enable_timestamp(void)
1536{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001537#ifdef HAVE_JUMP_LABEL
1538 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1539
1540 if (deferred) {
1541 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001542 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001543 return;
1544 }
1545#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001546 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001548EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550void net_disable_timestamp(void)
1551{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001552#ifdef HAVE_JUMP_LABEL
1553 if (in_interrupt()) {
1554 atomic_inc(&netstamp_needed_deferred);
1555 return;
1556 }
1557#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001558 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001560EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Eric Dumazet3b098e22010-05-15 23:57:10 -07001562static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563{
Eric Dumazet588f0332011-11-15 04:12:55 +00001564 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001565 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001566 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567}
1568
Eric Dumazet588f0332011-11-15 04:12:55 +00001569#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001570 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001571 if ((COND) && !(SKB)->tstamp.tv64) \
1572 __net_timestamp(SKB); \
1573 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001574
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001575static inline bool is_skb_forwardable(struct net_device *dev,
1576 struct sk_buff *skb)
1577{
1578 unsigned int len;
1579
1580 if (!(dev->flags & IFF_UP))
1581 return false;
1582
1583 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1584 if (skb->len <= len)
1585 return true;
1586
1587 /* if TSO is enabled, we don't care about the length as the packet
1588 * could be forwarded without being segmented before
1589 */
1590 if (skb_is_gso(skb))
1591 return true;
1592
1593 return false;
1594}
1595
Arnd Bergmann44540962009-11-26 06:07:08 +00001596/**
1597 * dev_forward_skb - loopback an skb to another netif
1598 *
1599 * @dev: destination network device
1600 * @skb: buffer to forward
1601 *
1602 * return values:
1603 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001604 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001605 *
1606 * dev_forward_skb can be used for injecting an skb from the
1607 * start_xmit function of one device into the receive queue
1608 * of another device.
1609 *
1610 * The receiving device may be in another namespace, so
1611 * we have to clear all information in the skb that could
1612 * impact namespace isolation.
1613 */
1614int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1615{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001616 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1617 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1618 atomic_long_inc(&dev->rx_dropped);
1619 kfree_skb(skb);
1620 return NET_RX_DROP;
1621 }
1622 }
1623
Arnd Bergmann44540962009-11-26 06:07:08 +00001624 skb_orphan(skb);
1625
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001626 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001627 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001628 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001629 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001630 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001631 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001632 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001633 skb->tstamp.tv64 = 0;
1634 skb->pkt_type = PACKET_HOST;
1635 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001636 skb->mark = 0;
1637 secpath_reset(skb);
1638 nf_reset(skb);
Patrick McHardy124dff02013-04-05 20:42:05 +02001639 nf_reset_trace(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001640 return netif_rx(skb);
1641}
1642EXPORT_SYMBOL_GPL(dev_forward_skb);
1643
Changli Gao71d9dec2010-12-15 19:57:25 +00001644static inline int deliver_skb(struct sk_buff *skb,
1645 struct packet_type *pt_prev,
1646 struct net_device *orig_dev)
1647{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001648 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1649 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001650 atomic_inc(&skb->users);
1651 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1652}
1653
Eric Leblondc0de08d2012-08-16 22:02:58 +00001654static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1655{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001656 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001657 return false;
1658
1659 if (ptype->id_match)
1660 return ptype->id_match(ptype, skb->sk);
1661 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1662 return true;
1663
1664 return false;
1665}
1666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667/*
1668 * Support routine. Sends outgoing frames to any network
1669 * taps currently in use.
1670 */
1671
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001672static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673{
1674 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001675 struct sk_buff *skb2 = NULL;
1676 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 rcu_read_lock();
1679 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1680 /* Never send packets back to the socket
1681 * they originated from - MvS (miquels@drinkel.ow.org)
1682 */
1683 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001684 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001685 if (pt_prev) {
1686 deliver_skb(skb2, pt_prev, skb->dev);
1687 pt_prev = ptype;
1688 continue;
1689 }
1690
1691 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 if (!skb2)
1693 break;
1694
Eric Dumazet70978182010-12-20 21:22:51 +00001695 net_timestamp_set(skb2);
1696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 /* skb->nh should be correctly
1698 set by sender, so that the second statement is
1699 just protection against buggy protocols.
1700 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001701 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001703 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001704 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001705 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1706 ntohs(skb2->protocol),
1707 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001708 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 }
1710
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001711 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001713 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 }
1715 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001716 if (pt_prev)
1717 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 rcu_read_unlock();
1719}
1720
Ben Hutchings2c530402012-07-10 10:55:09 +00001721/**
1722 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001723 * @dev: Network device
1724 * @txq: number of queues available
1725 *
1726 * If real_num_tx_queues is changed the tc mappings may no longer be
1727 * valid. To resolve this verify the tc mapping remains valid and if
1728 * not NULL the mapping. With no priorities mapping to this
1729 * offset/count pair it will no longer be used. In the worst case TC0
1730 * is invalid nothing can be done so disable priority mappings. If is
1731 * expected that drivers will fix this mapping if they can before
1732 * calling netif_set_real_num_tx_queues.
1733 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001734static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001735{
1736 int i;
1737 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1738
1739 /* If TC0 is invalidated disable TC mapping */
1740 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001741 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001742 dev->num_tc = 0;
1743 return;
1744 }
1745
1746 /* Invalidated prio to tc mappings set to TC0 */
1747 for (i = 1; i < TC_BITMASK + 1; i++) {
1748 int q = netdev_get_prio_tc_map(dev, i);
1749
1750 tc = &dev->tc_to_txq[q];
1751 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001752 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1753 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001754 netdev_set_prio_tc_map(dev, i, 0);
1755 }
1756 }
1757}
1758
Alexander Duyck537c00d2013-01-10 08:57:02 +00001759#ifdef CONFIG_XPS
1760static DEFINE_MUTEX(xps_map_mutex);
1761#define xmap_dereference(P) \
1762 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1763
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001764static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1765 int cpu, u16 index)
1766{
1767 struct xps_map *map = NULL;
1768 int pos;
1769
1770 if (dev_maps)
1771 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1772
1773 for (pos = 0; map && pos < map->len; pos++) {
1774 if (map->queues[pos] == index) {
1775 if (map->len > 1) {
1776 map->queues[pos] = map->queues[--map->len];
1777 } else {
1778 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1779 kfree_rcu(map, rcu);
1780 map = NULL;
1781 }
1782 break;
1783 }
1784 }
1785
1786 return map;
1787}
1788
Alexander Duyck024e9672013-01-10 08:57:46 +00001789static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001790{
1791 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001792 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001793 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001794
1795 mutex_lock(&xps_map_mutex);
1796 dev_maps = xmap_dereference(dev->xps_maps);
1797
1798 if (!dev_maps)
1799 goto out_no_maps;
1800
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001801 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001802 for (i = index; i < dev->num_tx_queues; i++) {
1803 if (!remove_xps_queue(dev_maps, cpu, i))
1804 break;
1805 }
1806 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001807 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001808 }
1809
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001810 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001811 RCU_INIT_POINTER(dev->xps_maps, NULL);
1812 kfree_rcu(dev_maps, rcu);
1813 }
1814
Alexander Duyck024e9672013-01-10 08:57:46 +00001815 for (i = index; i < dev->num_tx_queues; i++)
1816 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1817 NUMA_NO_NODE);
1818
Alexander Duyck537c00d2013-01-10 08:57:02 +00001819out_no_maps:
1820 mutex_unlock(&xps_map_mutex);
1821}
1822
Alexander Duyck01c5f862013-01-10 08:57:35 +00001823static struct xps_map *expand_xps_map(struct xps_map *map,
1824 int cpu, u16 index)
1825{
1826 struct xps_map *new_map;
1827 int alloc_len = XPS_MIN_MAP_ALLOC;
1828 int i, pos;
1829
1830 for (pos = 0; map && pos < map->len; pos++) {
1831 if (map->queues[pos] != index)
1832 continue;
1833 return map;
1834 }
1835
1836 /* Need to add queue to this CPU's existing map */
1837 if (map) {
1838 if (pos < map->alloc_len)
1839 return map;
1840
1841 alloc_len = map->alloc_len * 2;
1842 }
1843
1844 /* Need to allocate new map to store queue on this CPU's map */
1845 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1846 cpu_to_node(cpu));
1847 if (!new_map)
1848 return NULL;
1849
1850 for (i = 0; i < pos; i++)
1851 new_map->queues[i] = map->queues[i];
1852 new_map->alloc_len = alloc_len;
1853 new_map->len = pos;
1854
1855 return new_map;
1856}
1857
Alexander Duyck537c00d2013-01-10 08:57:02 +00001858int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1859{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001860 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001861 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001862 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001863 int cpu, numa_node_id = -2;
1864 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001865
1866 mutex_lock(&xps_map_mutex);
1867
1868 dev_maps = xmap_dereference(dev->xps_maps);
1869
Alexander Duyck01c5f862013-01-10 08:57:35 +00001870 /* allocate memory for queue storage */
1871 for_each_online_cpu(cpu) {
1872 if (!cpumask_test_cpu(cpu, mask))
1873 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001874
Alexander Duyck01c5f862013-01-10 08:57:35 +00001875 if (!new_dev_maps)
1876 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001877 if (!new_dev_maps) {
1878 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001879 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001880 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001881
1882 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1883 NULL;
1884
1885 map = expand_xps_map(map, cpu, index);
1886 if (!map)
1887 goto error;
1888
1889 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1890 }
1891
1892 if (!new_dev_maps)
1893 goto out_no_new_maps;
1894
1895 for_each_possible_cpu(cpu) {
1896 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1897 /* add queue to CPU maps */
1898 int pos = 0;
1899
1900 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1901 while ((pos < map->len) && (map->queues[pos] != index))
1902 pos++;
1903
1904 if (pos == map->len)
1905 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001906#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001907 if (numa_node_id == -2)
1908 numa_node_id = cpu_to_node(cpu);
1909 else if (numa_node_id != cpu_to_node(cpu))
1910 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001911#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001912 } else if (dev_maps) {
1913 /* fill in the new device map from the old device map */
1914 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1915 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001916 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001917
Alexander Duyck537c00d2013-01-10 08:57:02 +00001918 }
1919
Alexander Duyck01c5f862013-01-10 08:57:35 +00001920 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1921
Alexander Duyck537c00d2013-01-10 08:57:02 +00001922 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001923 if (dev_maps) {
1924 for_each_possible_cpu(cpu) {
1925 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1926 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1927 if (map && map != new_map)
1928 kfree_rcu(map, rcu);
1929 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001930
Alexander Duyck537c00d2013-01-10 08:57:02 +00001931 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001932 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001933
Alexander Duyck01c5f862013-01-10 08:57:35 +00001934 dev_maps = new_dev_maps;
1935 active = true;
1936
1937out_no_new_maps:
1938 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00001939 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1940 (numa_node_id >= 0) ? numa_node_id :
1941 NUMA_NO_NODE);
1942
Alexander Duyck01c5f862013-01-10 08:57:35 +00001943 if (!dev_maps)
1944 goto out_no_maps;
1945
1946 /* removes queue from unused CPUs */
1947 for_each_possible_cpu(cpu) {
1948 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
1949 continue;
1950
1951 if (remove_xps_queue(dev_maps, cpu, index))
1952 active = true;
1953 }
1954
1955 /* free map if not active */
1956 if (!active) {
1957 RCU_INIT_POINTER(dev->xps_maps, NULL);
1958 kfree_rcu(dev_maps, rcu);
1959 }
1960
1961out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00001962 mutex_unlock(&xps_map_mutex);
1963
1964 return 0;
1965error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00001966 /* remove any maps that we added */
1967 for_each_possible_cpu(cpu) {
1968 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1969 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1970 NULL;
1971 if (new_map && new_map != map)
1972 kfree(new_map);
1973 }
1974
Alexander Duyck537c00d2013-01-10 08:57:02 +00001975 mutex_unlock(&xps_map_mutex);
1976
Alexander Duyck537c00d2013-01-10 08:57:02 +00001977 kfree(new_dev_maps);
1978 return -ENOMEM;
1979}
1980EXPORT_SYMBOL(netif_set_xps_queue);
1981
1982#endif
John Fastabendf0796d52010-07-01 13:21:57 +00001983/*
1984 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1985 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1986 */
Tom Herberte6484932010-10-18 18:04:39 +00001987int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00001988{
Tom Herbert1d24eb42010-11-21 13:17:27 +00001989 int rc;
1990
Tom Herberte6484932010-10-18 18:04:39 +00001991 if (txq < 1 || txq > dev->num_tx_queues)
1992 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00001993
Ben Hutchings5c565802011-02-15 19:39:21 +00001994 if (dev->reg_state == NETREG_REGISTERED ||
1995 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00001996 ASSERT_RTNL();
1997
Tom Herbert1d24eb42010-11-21 13:17:27 +00001998 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1999 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002000 if (rc)
2001 return rc;
2002
John Fastabend4f57c082011-01-17 08:06:04 +00002003 if (dev->num_tc)
2004 netif_setup_tc(dev, txq);
2005
Alexander Duyck024e9672013-01-10 08:57:46 +00002006 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002007 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002008#ifdef CONFIG_XPS
2009 netif_reset_xps_queues_gt(dev, txq);
2010#endif
2011 }
John Fastabendf0796d52010-07-01 13:21:57 +00002012 }
Tom Herberte6484932010-10-18 18:04:39 +00002013
2014 dev->real_num_tx_queues = txq;
2015 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002016}
2017EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002018
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002019#ifdef CONFIG_RPS
2020/**
2021 * netif_set_real_num_rx_queues - set actual number of RX queues used
2022 * @dev: Network device
2023 * @rxq: Actual number of RX queues
2024 *
2025 * This must be called either with the rtnl_lock held or before
2026 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002027 * negative error code. If called before registration, it always
2028 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002029 */
2030int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2031{
2032 int rc;
2033
Tom Herbertbd25fa72010-10-18 18:00:16 +00002034 if (rxq < 1 || rxq > dev->num_rx_queues)
2035 return -EINVAL;
2036
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002037 if (dev->reg_state == NETREG_REGISTERED) {
2038 ASSERT_RTNL();
2039
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002040 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2041 rxq);
2042 if (rc)
2043 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002044 }
2045
2046 dev->real_num_rx_queues = rxq;
2047 return 0;
2048}
2049EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2050#endif
2051
Ben Hutchings2c530402012-07-10 10:55:09 +00002052/**
2053 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002054 *
2055 * This routine should set an upper limit on the number of RSS queues
2056 * used by default by multiqueue devices.
2057 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002058int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002059{
2060 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2061}
2062EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2063
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002064static inline void __netif_reschedule(struct Qdisc *q)
2065{
2066 struct softnet_data *sd;
2067 unsigned long flags;
2068
2069 local_irq_save(flags);
2070 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002071 q->next_sched = NULL;
2072 *sd->output_queue_tailp = q;
2073 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002074 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2075 local_irq_restore(flags);
2076}
2077
David S. Miller37437bb2008-07-16 02:15:04 -07002078void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002079{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002080 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2081 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002082}
2083EXPORT_SYMBOL(__netif_schedule);
2084
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002085void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002086{
David S. Miller3578b0c2010-08-03 00:24:04 -07002087 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002088 struct softnet_data *sd;
2089 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002090
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002091 local_irq_save(flags);
2092 sd = &__get_cpu_var(softnet_data);
2093 skb->next = sd->completion_queue;
2094 sd->completion_queue = skb;
2095 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2096 local_irq_restore(flags);
2097 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002098}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002099EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002100
2101void dev_kfree_skb_any(struct sk_buff *skb)
2102{
2103 if (in_irq() || irqs_disabled())
2104 dev_kfree_skb_irq(skb);
2105 else
2106 dev_kfree_skb(skb);
2107}
2108EXPORT_SYMBOL(dev_kfree_skb_any);
2109
2110
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002111/**
2112 * netif_device_detach - mark device as removed
2113 * @dev: network device
2114 *
2115 * Mark device as removed from system and therefore no longer available.
2116 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002117void netif_device_detach(struct net_device *dev)
2118{
2119 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2120 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002121 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002122 }
2123}
2124EXPORT_SYMBOL(netif_device_detach);
2125
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002126/**
2127 * netif_device_attach - mark device as attached
2128 * @dev: network device
2129 *
2130 * Mark device as attached from system and restart if needed.
2131 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002132void netif_device_attach(struct net_device *dev)
2133{
2134 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2135 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002136 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002137 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002138 }
2139}
2140EXPORT_SYMBOL(netif_device_attach);
2141
Ben Hutchings36c92472012-01-17 07:57:56 +00002142static void skb_warn_bad_offload(const struct sk_buff *skb)
2143{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002144 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002145 struct net_device *dev = skb->dev;
2146 const char *driver = "";
2147
Ben Greearc846ad92013-04-19 10:45:52 +00002148 if (!net_ratelimit())
2149 return;
2150
Ben Hutchings36c92472012-01-17 07:57:56 +00002151 if (dev && dev->dev.parent)
2152 driver = dev_driver_string(dev->dev.parent);
2153
2154 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2155 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002156 driver, dev ? &dev->features : &null_features,
2157 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002158 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2159 skb_shinfo(skb)->gso_type, skb->ip_summed);
2160}
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162/*
2163 * Invalidate hardware checksum when packet is to be mangled, and
2164 * complete checksum manually on outgoing path.
2165 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002166int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167{
Al Virod3bc23e2006-11-14 21:24:49 -08002168 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002169 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
Patrick McHardy84fa7932006-08-29 16:44:56 -07002171 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002172 goto out_set_summed;
2173
2174 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002175 skb_warn_bad_offload(skb);
2176 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 }
2178
Eric Dumazetcef401d2013-01-25 20:34:37 +00002179 /* Before computing a checksum, we should make sure no frag could
2180 * be modified by an external entity : checksum could be wrong.
2181 */
2182 if (skb_has_shared_frag(skb)) {
2183 ret = __skb_linearize(skb);
2184 if (ret)
2185 goto out;
2186 }
2187
Michał Mirosław55508d62010-12-14 15:24:08 +00002188 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002189 BUG_ON(offset >= skb_headlen(skb));
2190 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2191
2192 offset += skb->csum_offset;
2193 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2194
2195 if (skb_cloned(skb) &&
2196 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2198 if (ret)
2199 goto out;
2200 }
2201
Herbert Xua0308472007-10-15 01:47:15 -07002202 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002203out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002205out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 return ret;
2207}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002208EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002210__be16 skb_network_protocol(struct sk_buff *skb)
2211{
2212 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002213 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002214
Pravin B Shelar19acc322013-05-07 20:41:07 +00002215 /* Tunnel gso handlers can set protocol to ethernet. */
2216 if (type == htons(ETH_P_TEB)) {
2217 struct ethhdr *eth;
2218
2219 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2220 return 0;
2221
2222 eth = (struct ethhdr *)skb_mac_header(skb);
2223 type = eth->h_proto;
2224 }
2225
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002226 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002227 struct vlan_hdr *vh;
2228
2229 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2230 return 0;
2231
2232 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2233 type = vh->h_vlan_encapsulated_proto;
2234 vlan_depth += VLAN_HLEN;
2235 }
2236
2237 return type;
2238}
2239
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002240/**
2241 * skb_mac_gso_segment - mac layer segmentation handler.
2242 * @skb: buffer to segment
2243 * @features: features for the output path (see dev->features)
2244 */
2245struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2246 netdev_features_t features)
2247{
2248 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2249 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002250 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002251
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002252 if (unlikely(!type))
2253 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002254
2255 __skb_pull(skb, skb->mac_len);
2256
2257 rcu_read_lock();
2258 list_for_each_entry_rcu(ptype, &offload_base, list) {
2259 if (ptype->type == type && ptype->callbacks.gso_segment) {
2260 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2261 int err;
2262
2263 err = ptype->callbacks.gso_send_check(skb);
2264 segs = ERR_PTR(err);
2265 if (err || skb_gso_ok(skb, features))
2266 break;
2267 __skb_push(skb, (skb->data -
2268 skb_network_header(skb)));
2269 }
2270 segs = ptype->callbacks.gso_segment(skb, features);
2271 break;
2272 }
2273 }
2274 rcu_read_unlock();
2275
2276 __skb_push(skb, skb->data - skb_mac_header(skb));
2277
2278 return segs;
2279}
2280EXPORT_SYMBOL(skb_mac_gso_segment);
2281
2282
Cong Wang12b00042013-02-05 16:36:38 +00002283/* openvswitch calls this on rx path, so we need a different check.
2284 */
2285static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2286{
2287 if (tx_path)
2288 return skb->ip_summed != CHECKSUM_PARTIAL;
2289 else
2290 return skb->ip_summed == CHECKSUM_NONE;
2291}
2292
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002293/**
Cong Wang12b00042013-02-05 16:36:38 +00002294 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002295 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002296 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002297 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002298 *
2299 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002300 *
2301 * It may return NULL if the skb requires no segmentation. This is
2302 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002303 */
Cong Wang12b00042013-02-05 16:36:38 +00002304struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2305 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002306{
Cong Wang12b00042013-02-05 16:36:38 +00002307 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002308 int err;
2309
Ben Hutchings36c92472012-01-17 07:57:56 +00002310 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002311
Herbert Xua430a432006-07-08 13:34:56 -07002312 if (skb_header_cloned(skb) &&
2313 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2314 return ERR_PTR(err);
2315 }
2316
Pravin B Shelar68c33162013-02-14 14:02:41 +00002317 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002318 skb_reset_mac_header(skb);
2319 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002320
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002321 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002322}
Cong Wang12b00042013-02-05 16:36:38 +00002323EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002324
Herbert Xufb286bb2005-11-10 13:01:24 -08002325/* Take action when hardware reception checksum errors are detected. */
2326#ifdef CONFIG_BUG
2327void netdev_rx_csum_fault(struct net_device *dev)
2328{
2329 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002330 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002331 dump_stack();
2332 }
2333}
2334EXPORT_SYMBOL(netdev_rx_csum_fault);
2335#endif
2336
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337/* Actually, we should eliminate this check as soon as we know, that:
2338 * 1. IOMMU is present and allows to map all the memory.
2339 * 2. No high memory really exists on this machine.
2340 */
2341
Eric Dumazet9092c652010-04-02 13:34:49 -07002342static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002344#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002346 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002347 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2348 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2349 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002350 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002351 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002354 if (PCI_DMA_BUS_IS_PHYS) {
2355 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356
Eric Dumazet9092c652010-04-02 13:34:49 -07002357 if (!pdev)
2358 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002359 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002360 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2361 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002362 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2363 return 1;
2364 }
2365 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002366#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 return 0;
2368}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002370struct dev_gso_cb {
2371 void (*destructor)(struct sk_buff *skb);
2372};
2373
2374#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2375
2376static void dev_gso_skb_destructor(struct sk_buff *skb)
2377{
2378 struct dev_gso_cb *cb;
2379
2380 do {
2381 struct sk_buff *nskb = skb->next;
2382
2383 skb->next = nskb->next;
2384 nskb->next = NULL;
2385 kfree_skb(nskb);
2386 } while (skb->next);
2387
2388 cb = DEV_GSO_CB(skb);
2389 if (cb->destructor)
2390 cb->destructor(skb);
2391}
2392
2393/**
2394 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2395 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002396 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002397 *
2398 * This function segments the given skb and stores the list of segments
2399 * in skb->next.
2400 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002401static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002402{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002403 struct sk_buff *segs;
2404
Herbert Xu576a30e2006-06-27 13:22:38 -07002405 segs = skb_gso_segment(skb, features);
2406
2407 /* Verifying header integrity only. */
2408 if (!segs)
2409 return 0;
2410
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002411 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002412 return PTR_ERR(segs);
2413
2414 skb->next = segs;
2415 DEV_GSO_CB(skb)->destructor = skb->destructor;
2416 skb->destructor = dev_gso_skb_destructor;
2417
2418 return 0;
2419}
2420
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002421static netdev_features_t harmonize_features(struct sk_buff *skb,
2422 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002423{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002424 if (skb->ip_summed != CHECKSUM_NONE &&
2425 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002426 features &= ~NETIF_F_ALL_CSUM;
Jesse Grossf01a5232011-01-09 06:23:31 +00002427 } else if (illegal_highdma(skb->dev, skb)) {
2428 features &= ~NETIF_F_SG;
2429 }
2430
2431 return features;
2432}
2433
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002434netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002435{
2436 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002437 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002438
Ben Hutchings30b678d2012-07-30 15:57:00 +00002439 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2440 features &= ~NETIF_F_GSO_MASK;
2441
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002442 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002443 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2444 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002445 } else if (!vlan_tx_tag_present(skb)) {
2446 return harmonize_features(skb, protocol, features);
2447 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002448
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002449 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2450 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002451
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002452 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002453 return harmonize_features(skb, protocol, features);
2454 } else {
2455 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002456 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2457 NETIF_F_HW_VLAN_STAG_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002458 return harmonize_features(skb, protocol, features);
2459 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002460}
Jesse Grossf01a5232011-01-09 06:23:31 +00002461EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002462
John Fastabend6afff0c2010-06-16 14:18:12 +00002463/*
2464 * Returns true if either:
2465 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002466 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002467 */
2468static inline int skb_needs_linearize(struct sk_buff *skb,
Patrick McHardy6708c9e2013-05-01 22:36:49 +00002469 netdev_features_t features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002470{
Jesse Gross02932ce2011-01-09 06:23:34 +00002471 return skb_is_nonlinear(skb) &&
2472 ((skb_has_frag_list(skb) &&
2473 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002474 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002475 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002476}
2477
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002478int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2479 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002480{
Stephen Hemminger00829822008-11-20 20:14:53 -08002481 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002482 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002483 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002484
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002485 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002486 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002487
Eric Dumazet93f154b2009-05-18 22:19:19 -07002488 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002489 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002490 * its hot in this cpu cache
2491 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002492 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2493 skb_dst_drop(skb);
2494
Jesse Grossfc741212011-01-09 06:23:32 +00002495 features = netif_skb_features(skb);
2496
Jesse Gross7b9c6092010-10-20 13:56:04 +00002497 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002498 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2499 skb = __vlan_put_tag(skb, skb->vlan_proto,
2500 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002501 if (unlikely(!skb))
2502 goto out;
2503
2504 skb->vlan_tci = 0;
2505 }
2506
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002507 /* If encapsulation offload request, verify we are testing
2508 * hardware encapsulation features instead of standard
2509 * features for the netdev
2510 */
2511 if (skb->encapsulation)
2512 features &= dev->hw_enc_features;
2513
Jesse Grossfc741212011-01-09 06:23:32 +00002514 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002515 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002516 goto out_kfree_skb;
2517 if (skb->next)
2518 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002519 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002520 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002521 __skb_linearize(skb))
2522 goto out_kfree_skb;
2523
2524 /* If packet is not checksummed and device does not
2525 * support checksumming for this protocol, complete
2526 * checksumming here.
2527 */
2528 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002529 if (skb->encapsulation)
2530 skb_set_inner_transport_header(skb,
2531 skb_checksum_start_offset(skb));
2532 else
2533 skb_set_transport_header(skb,
2534 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002535 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002536 skb_checksum_help(skb))
2537 goto out_kfree_skb;
2538 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002539 }
2540
Eric Dumazetb40863c2012-09-18 20:44:49 +00002541 if (!list_empty(&ptype_all))
2542 dev_queue_xmit_nit(skb, dev);
2543
Koki Sanagiec764bf2011-05-30 21:48:34 +00002544 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002545 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002546 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002547 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002548 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002549 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002550 }
2551
Herbert Xu576a30e2006-06-27 13:22:38 -07002552gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002553 do {
2554 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002555
2556 skb->next = nskb->next;
2557 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002558
Eric Dumazetb40863c2012-09-18 20:44:49 +00002559 if (!list_empty(&ptype_all))
2560 dev_queue_xmit_nit(nskb, dev);
2561
Koki Sanagiec764bf2011-05-30 21:48:34 +00002562 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002563 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002564 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002565 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002566 if (rc & ~NETDEV_TX_MASK)
2567 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002568 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002569 skb->next = nskb;
2570 return rc;
2571 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002572 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002573 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002574 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002575 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002576
Patrick McHardy572a9d72009-11-10 06:14:14 +00002577out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002578 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002579 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002580 consume_skb(skb);
2581 return rc;
2582 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002583out_kfree_skb:
2584 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002585out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002586 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002587}
2588
Eric Dumazet1def9232013-01-10 12:36:42 +00002589static void qdisc_pkt_len_init(struct sk_buff *skb)
2590{
2591 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2592
2593 qdisc_skb_cb(skb)->pkt_len = skb->len;
2594
2595 /* To get more precise estimation of bytes sent on wire,
2596 * we add to pkt_len the headers size of all segments
2597 */
2598 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002599 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002600 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002601
Eric Dumazet757b8b12013-01-15 21:14:21 -08002602 /* mac layer + network layer */
2603 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2604
2605 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002606 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2607 hdr_len += tcp_hdrlen(skb);
2608 else
2609 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002610
2611 if (shinfo->gso_type & SKB_GSO_DODGY)
2612 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2613 shinfo->gso_size);
2614
2615 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002616 }
2617}
2618
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002619static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2620 struct net_device *dev,
2621 struct netdev_queue *txq)
2622{
2623 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002624 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002625 int rc;
2626
Eric Dumazet1def9232013-01-10 12:36:42 +00002627 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002628 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002629 /*
2630 * Heuristic to force contended enqueues to serialize on a
2631 * separate lock before trying to get qdisc main lock.
2632 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2633 * and dequeue packets faster.
2634 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002635 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002636 if (unlikely(contended))
2637 spin_lock(&q->busylock);
2638
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002639 spin_lock(root_lock);
2640 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2641 kfree_skb(skb);
2642 rc = NET_XMIT_DROP;
2643 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002644 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002645 /*
2646 * This is a work-conserving queue; there are no old skbs
2647 * waiting to be sent out; and the qdisc is not running -
2648 * xmit the skb directly.
2649 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002650 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2651 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002652
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002653 qdisc_bstats_update(q, skb);
2654
Eric Dumazet79640a42010-06-02 05:09:29 -07002655 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2656 if (unlikely(contended)) {
2657 spin_unlock(&q->busylock);
2658 contended = false;
2659 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002660 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002661 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002662 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002663
2664 rc = NET_XMIT_SUCCESS;
2665 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002666 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002667 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002668 if (qdisc_run_begin(q)) {
2669 if (unlikely(contended)) {
2670 spin_unlock(&q->busylock);
2671 contended = false;
2672 }
2673 __qdisc_run(q);
2674 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002675 }
2676 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002677 if (unlikely(contended))
2678 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002679 return rc;
2680}
2681
Neil Horman5bc14212011-11-22 05:10:51 +00002682#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2683static void skb_update_prio(struct sk_buff *skb)
2684{
Igor Maravic6977a792011-11-25 07:44:54 +00002685 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002686
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002687 if (!skb->priority && skb->sk && map) {
2688 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2689
2690 if (prioidx < map->priomap_len)
2691 skb->priority = map->priomap[prioidx];
2692 }
Neil Horman5bc14212011-11-22 05:10:51 +00002693}
2694#else
2695#define skb_update_prio(skb)
2696#endif
2697
Eric Dumazet745e20f2010-09-29 13:23:09 -07002698static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002699#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002700
Dave Jonesd29f7492008-07-22 14:09:06 -07002701/**
Michel Machado95603e22012-06-12 10:16:35 +00002702 * dev_loopback_xmit - loop back @skb
2703 * @skb: buffer to transmit
2704 */
2705int dev_loopback_xmit(struct sk_buff *skb)
2706{
2707 skb_reset_mac_header(skb);
2708 __skb_pull(skb, skb_network_offset(skb));
2709 skb->pkt_type = PACKET_LOOPBACK;
2710 skb->ip_summed = CHECKSUM_UNNECESSARY;
2711 WARN_ON(!skb_dst(skb));
2712 skb_dst_force(skb);
2713 netif_rx_ni(skb);
2714 return 0;
2715}
2716EXPORT_SYMBOL(dev_loopback_xmit);
2717
2718/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002719 * dev_queue_xmit - transmit a buffer
2720 * @skb: buffer to transmit
2721 *
2722 * Queue a buffer for transmission to a network device. The caller must
2723 * have set the device and priority and built the buffer before calling
2724 * this function. The function can be called from an interrupt.
2725 *
2726 * A negative errno code is returned on a failure. A success does not
2727 * guarantee the frame will be transmitted as it may be dropped due
2728 * to congestion or traffic shaping.
2729 *
2730 * -----------------------------------------------------------------------------------
2731 * I notice this method can also return errors from the queue disciplines,
2732 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2733 * be positive.
2734 *
2735 * Regardless of the return value, the skb is consumed, so it is currently
2736 * difficult to retry a send to this method. (You can bump the ref count
2737 * before sending to hold a reference for retry if you are careful.)
2738 *
2739 * When calling this method, interrupts MUST be enabled. This is because
2740 * the BH enable code must have IRQs enabled so that it will not deadlock.
2741 * --BLG
2742 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743int dev_queue_xmit(struct sk_buff *skb)
2744{
2745 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002746 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 struct Qdisc *q;
2748 int rc = -ENOMEM;
2749
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002750 skb_reset_mac_header(skb);
2751
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002752 /* Disable soft irqs for various locks below. Also
2753 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002755 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756
Neil Horman5bc14212011-11-22 05:10:51 +00002757 skb_update_prio(skb);
2758
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002759 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002760 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002761
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002763 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002765 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002767 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002768 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 }
2770
2771 /* The device has no queue. Common case for software devices:
2772 loopback, all the sorts of tunnels...
2773
Herbert Xu932ff272006-06-09 12:20:56 -07002774 Really, it is unlikely that netif_tx_lock protection is necessary
2775 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 counters.)
2777 However, it is possible, that they rely on protection
2778 made by us here.
2779
2780 Check this and shot the lock. It is not prone from deadlocks.
2781 Either shot noqueue qdisc, it is even simpler 8)
2782 */
2783 if (dev->flags & IFF_UP) {
2784 int cpu = smp_processor_id(); /* ok because BHs are off */
2785
David S. Millerc773e842008-07-08 23:13:53 -07002786 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
Eric Dumazet745e20f2010-09-29 13:23:09 -07002788 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2789 goto recursion_alert;
2790
David S. Millerc773e842008-07-08 23:13:53 -07002791 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
Tom Herbert734664982011-11-28 16:32:44 +00002793 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002794 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002795 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002796 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002797 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002798 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 goto out;
2800 }
2801 }
David S. Millerc773e842008-07-08 23:13:53 -07002802 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002803 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2804 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 } else {
2806 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002807 * unfortunately
2808 */
2809recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002810 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2811 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 }
2813 }
2814
2815 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002816 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 kfree_skb(skb);
2819 return rc;
2820out:
Herbert Xud4828d82006-06-22 02:28:18 -07002821 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 return rc;
2823}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002824EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
2826
2827/*=======================================================================
2828 Receiver routines
2829 =======================================================================*/
2830
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002831int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002832EXPORT_SYMBOL(netdev_max_backlog);
2833
Eric Dumazet3b098e22010-05-15 23:57:10 -07002834int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002835int netdev_budget __read_mostly = 300;
2836int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002838/* Called with irq disabled */
2839static inline void ____napi_schedule(struct softnet_data *sd,
2840 struct napi_struct *napi)
2841{
2842 list_add_tail(&napi->poll_list, &sd->poll_list);
2843 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2844}
2845
Eric Dumazetdf334542010-03-24 19:13:54 +00002846#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002847
2848/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002849struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002850EXPORT_SYMBOL(rps_sock_flow_table);
2851
Ingo Molnarc5905af2012-02-24 08:31:31 +01002852struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002853
Ben Hutchingsc4454772011-01-19 11:03:53 +00002854static struct rps_dev_flow *
2855set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2856 struct rps_dev_flow *rflow, u16 next_cpu)
2857{
Ben Hutchings09994d12011-10-03 04:42:46 +00002858 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002859#ifdef CONFIG_RFS_ACCEL
2860 struct netdev_rx_queue *rxqueue;
2861 struct rps_dev_flow_table *flow_table;
2862 struct rps_dev_flow *old_rflow;
2863 u32 flow_id;
2864 u16 rxq_index;
2865 int rc;
2866
2867 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002868 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2869 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002870 goto out;
2871 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2872 if (rxq_index == skb_get_rx_queue(skb))
2873 goto out;
2874
2875 rxqueue = dev->_rx + rxq_index;
2876 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2877 if (!flow_table)
2878 goto out;
2879 flow_id = skb->rxhash & flow_table->mask;
2880 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2881 rxq_index, flow_id);
2882 if (rc < 0)
2883 goto out;
2884 old_rflow = rflow;
2885 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002886 rflow->filter = rc;
2887 if (old_rflow->filter == rflow->filter)
2888 old_rflow->filter = RPS_NO_FILTER;
2889 out:
2890#endif
2891 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002892 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002893 }
2894
Ben Hutchings09994d12011-10-03 04:42:46 +00002895 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002896 return rflow;
2897}
2898
Tom Herbert0a9627f2010-03-16 08:03:29 +00002899/*
2900 * get_rps_cpu is called from netif_receive_skb and returns the target
2901 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002902 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002903 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002904static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2905 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002906{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002907 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002908 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002909 struct rps_dev_flow_table *flow_table;
2910 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002911 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002912 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002913
Tom Herbert0a9627f2010-03-16 08:03:29 +00002914 if (skb_rx_queue_recorded(skb)) {
2915 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002916 if (unlikely(index >= dev->real_num_rx_queues)) {
2917 WARN_ONCE(dev->real_num_rx_queues > 1,
2918 "%s received packet on queue %u, but number "
2919 "of RX queues is %u\n",
2920 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002921 goto done;
2922 }
2923 rxqueue = dev->_rx + index;
2924 } else
2925 rxqueue = dev->_rx;
2926
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002927 map = rcu_dereference(rxqueue->rps_map);
2928 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08002929 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00002930 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00002931 tcpu = map->cpus[0];
2932 if (cpu_online(tcpu))
2933 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002934 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00002935 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00002936 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00002937 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002938 }
2939
Changli Gao2d47b452010-08-17 19:00:56 +00002940 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002941 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00002942 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002943
Tom Herbertfec5e652010-04-16 16:01:27 -07002944 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2945 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2946 if (flow_table && sock_flow_table) {
2947 u16 next_cpu;
2948 struct rps_dev_flow *rflow;
2949
2950 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2951 tcpu = rflow->cpu;
2952
2953 next_cpu = sock_flow_table->ents[skb->rxhash &
2954 sock_flow_table->mask];
2955
2956 /*
2957 * If the desired CPU (where last recvmsg was done) is
2958 * different from current CPU (one in the rx-queue flow
2959 * table entry), switch if one of the following holds:
2960 * - Current CPU is unset (equal to RPS_NO_CPU).
2961 * - Current CPU is offline.
2962 * - The current CPU's queue tail has advanced beyond the
2963 * last packet that was enqueued using this table entry.
2964 * This guarantees that all previous packets for the flow
2965 * have been dequeued, thus preserving in order delivery.
2966 */
2967 if (unlikely(tcpu != next_cpu) &&
2968 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2969 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00002970 rflow->last_qtail)) >= 0)) {
2971 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002972 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00002973 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00002974
Tom Herbertfec5e652010-04-16 16:01:27 -07002975 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2976 *rflowp = rflow;
2977 cpu = tcpu;
2978 goto done;
2979 }
2980 }
2981
Tom Herbert0a9627f2010-03-16 08:03:29 +00002982 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07002983 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00002984
2985 if (cpu_online(tcpu)) {
2986 cpu = tcpu;
2987 goto done;
2988 }
2989 }
2990
2991done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00002992 return cpu;
2993}
2994
Ben Hutchingsc4454772011-01-19 11:03:53 +00002995#ifdef CONFIG_RFS_ACCEL
2996
2997/**
2998 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2999 * @dev: Device on which the filter was set
3000 * @rxq_index: RX queue index
3001 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3002 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3003 *
3004 * Drivers that implement ndo_rx_flow_steer() should periodically call
3005 * this function for each installed filter and remove the filters for
3006 * which it returns %true.
3007 */
3008bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3009 u32 flow_id, u16 filter_id)
3010{
3011 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3012 struct rps_dev_flow_table *flow_table;
3013 struct rps_dev_flow *rflow;
3014 bool expire = true;
3015 int cpu;
3016
3017 rcu_read_lock();
3018 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3019 if (flow_table && flow_id <= flow_table->mask) {
3020 rflow = &flow_table->flows[flow_id];
3021 cpu = ACCESS_ONCE(rflow->cpu);
3022 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3023 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3024 rflow->last_qtail) <
3025 (int)(10 * flow_table->mask)))
3026 expire = false;
3027 }
3028 rcu_read_unlock();
3029 return expire;
3030}
3031EXPORT_SYMBOL(rps_may_expire_flow);
3032
3033#endif /* CONFIG_RFS_ACCEL */
3034
Tom Herbert0a9627f2010-03-16 08:03:29 +00003035/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003036static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003037{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003038 struct softnet_data *sd = data;
3039
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003040 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003041 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003042}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003043
Tom Herbertfec5e652010-04-16 16:01:27 -07003044#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003045
3046/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003047 * Check if this softnet_data structure is another cpu one
3048 * If yes, queue it to our IPI list and return 1
3049 * If no, return 0
3050 */
3051static int rps_ipi_queued(struct softnet_data *sd)
3052{
3053#ifdef CONFIG_RPS
3054 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3055
3056 if (sd != mysd) {
3057 sd->rps_ipi_next = mysd->rps_ipi_list;
3058 mysd->rps_ipi_list = sd;
3059
3060 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3061 return 1;
3062 }
3063#endif /* CONFIG_RPS */
3064 return 0;
3065}
3066
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003067#ifdef CONFIG_NET_FLOW_LIMIT
3068int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3069#endif
3070
3071static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3072{
3073#ifdef CONFIG_NET_FLOW_LIMIT
3074 struct sd_flow_limit *fl;
3075 struct softnet_data *sd;
3076 unsigned int old_flow, new_flow;
3077
3078 if (qlen < (netdev_max_backlog >> 1))
3079 return false;
3080
3081 sd = &__get_cpu_var(softnet_data);
3082
3083 rcu_read_lock();
3084 fl = rcu_dereference(sd->flow_limit);
3085 if (fl) {
3086 new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
3087 old_flow = fl->history[fl->history_head];
3088 fl->history[fl->history_head] = new_flow;
3089
3090 fl->history_head++;
3091 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3092
3093 if (likely(fl->buckets[old_flow]))
3094 fl->buckets[old_flow]--;
3095
3096 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3097 fl->count++;
3098 rcu_read_unlock();
3099 return true;
3100 }
3101 }
3102 rcu_read_unlock();
3103#endif
3104 return false;
3105}
3106
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003107/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003108 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3109 * queue (may be a remote CPU queue).
3110 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003111static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3112 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003113{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003114 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003115 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003116 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003117
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003118 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003119
3120 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003121
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003122 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003123 qlen = skb_queue_len(&sd->input_pkt_queue);
3124 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Changli Gao6e7676c2010-04-27 15:07:33 -07003125 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003126enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003127 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003128 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003129 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003130 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003131 return NET_RX_SUCCESS;
3132 }
3133
Eric Dumazetebda37c22010-05-06 23:51:21 +00003134 /* Schedule NAPI for backlog device
3135 * We can use non atomic operation since we own the queue lock
3136 */
3137 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003138 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003139 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003140 }
3141 goto enqueue;
3142 }
3143
Changli Gaodee42872010-05-02 05:42:16 +00003144 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003145 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003146
Tom Herbert0a9627f2010-03-16 08:03:29 +00003147 local_irq_restore(flags);
3148
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003149 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003150 kfree_skb(skb);
3151 return NET_RX_DROP;
3152}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154/**
3155 * netif_rx - post buffer to the network code
3156 * @skb: buffer to post
3157 *
3158 * This function receives a packet from a device driver and queues it for
3159 * the upper (protocol) levels to process. It always succeeds. The buffer
3160 * may be dropped during processing for congestion control or by the
3161 * protocol layers.
3162 *
3163 * return values:
3164 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 * NET_RX_DROP (packet was dropped)
3166 *
3167 */
3168
3169int netif_rx(struct sk_buff *skb)
3170{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003171 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
3173 /* if netpoll wants it, pretend we never saw it */
3174 if (netpoll_rx(skb))
3175 return NET_RX_DROP;
3176
Eric Dumazet588f0332011-11-15 04:12:55 +00003177 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178
Koki Sanagicf66ba52010-08-23 18:45:02 +09003179 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003180#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003181 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003182 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003183 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184
Changli Gaocece1942010-08-07 20:35:43 -07003185 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003186 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003187
3188 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003189 if (cpu < 0)
3190 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003191
3192 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3193
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003194 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003195 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003196 } else
3197#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003198 {
3199 unsigned int qtail;
3200 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3201 put_cpu();
3202 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003203 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003205EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206
3207int netif_rx_ni(struct sk_buff *skb)
3208{
3209 int err;
3210
3211 preempt_disable();
3212 err = netif_rx(skb);
3213 if (local_softirq_pending())
3214 do_softirq();
3215 preempt_enable();
3216
3217 return err;
3218}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219EXPORT_SYMBOL(netif_rx_ni);
3220
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221static void net_tx_action(struct softirq_action *h)
3222{
3223 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3224
3225 if (sd->completion_queue) {
3226 struct sk_buff *clist;
3227
3228 local_irq_disable();
3229 clist = sd->completion_queue;
3230 sd->completion_queue = NULL;
3231 local_irq_enable();
3232
3233 while (clist) {
3234 struct sk_buff *skb = clist;
3235 clist = clist->next;
3236
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003237 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003238 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 __kfree_skb(skb);
3240 }
3241 }
3242
3243 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003244 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245
3246 local_irq_disable();
3247 head = sd->output_queue;
3248 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003249 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 local_irq_enable();
3251
3252 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003253 struct Qdisc *q = head;
3254 spinlock_t *root_lock;
3255
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 head = head->next_sched;
3257
David S. Miller5fb66222008-08-02 20:02:43 -07003258 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003259 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003260 smp_mb__before_clear_bit();
3261 clear_bit(__QDISC_STATE_SCHED,
3262 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003263 qdisc_run(q);
3264 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003266 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003267 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003268 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003269 } else {
3270 smp_mb__before_clear_bit();
3271 clear_bit(__QDISC_STATE_SCHED,
3272 &q->state);
3273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 }
3275 }
3276 }
3277}
3278
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003279#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3280 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003281/* This hook is defined here for ATM LANE */
3282int (*br_fdb_test_addr_hook)(struct net_device *dev,
3283 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003284EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003285#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287#ifdef CONFIG_NET_CLS_ACT
3288/* TODO: Maybe we should just force sch_ingress to be compiled in
3289 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3290 * a compare and 2 stores extra right now if we dont have it on
3291 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003292 * NOTE: This doesn't stop any functionality; if you dont have
3293 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 *
3295 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003296static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003299 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003300 int result = TC_ACT_OK;
3301 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003302
Stephen Hemmingerde384832010-08-01 00:33:23 -07003303 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003304 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3305 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003306 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 }
3308
Herbert Xuf697c3e2007-10-14 00:38:47 -07003309 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3310 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3311
David S. Miller83874002008-07-17 00:53:03 -07003312 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003313 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003314 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003315 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3316 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003317 spin_unlock(qdisc_lock(q));
3318 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003319
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 return result;
3321}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003322
3323static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3324 struct packet_type **pt_prev,
3325 int *ret, struct net_device *orig_dev)
3326{
Eric Dumazet24824a02010-10-02 06:11:55 +00003327 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3328
3329 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003330 goto out;
3331
3332 if (*pt_prev) {
3333 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3334 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003335 }
3336
Eric Dumazet24824a02010-10-02 06:11:55 +00003337 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003338 case TC_ACT_SHOT:
3339 case TC_ACT_STOLEN:
3340 kfree_skb(skb);
3341 return NULL;
3342 }
3343
3344out:
3345 skb->tc_verd = 0;
3346 return skb;
3347}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348#endif
3349
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003350/**
3351 * netdev_rx_handler_register - register receive handler
3352 * @dev: device to register a handler for
3353 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003354 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003355 *
3356 * Register a receive hander for a device. This handler will then be
3357 * called from __netif_receive_skb. A negative errno code is returned
3358 * on a failure.
3359 *
3360 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003361 *
3362 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003363 */
3364int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003365 rx_handler_func_t *rx_handler,
3366 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003367{
3368 ASSERT_RTNL();
3369
3370 if (dev->rx_handler)
3371 return -EBUSY;
3372
Eric Dumazet00cfec32013-03-29 03:01:22 +00003373 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003374 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003375 rcu_assign_pointer(dev->rx_handler, rx_handler);
3376
3377 return 0;
3378}
3379EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3380
3381/**
3382 * netdev_rx_handler_unregister - unregister receive handler
3383 * @dev: device to unregister a handler from
3384 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003385 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003386 *
3387 * The caller must hold the rtnl_mutex.
3388 */
3389void netdev_rx_handler_unregister(struct net_device *dev)
3390{
3391
3392 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003393 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003394 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3395 * section has a guarantee to see a non NULL rx_handler_data
3396 * as well.
3397 */
3398 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003399 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003400}
3401EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3402
Mel Gormanb4b9e352012-07-31 16:44:26 -07003403/*
3404 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3405 * the special handling of PFMEMALLOC skbs.
3406 */
3407static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3408{
3409 switch (skb->protocol) {
3410 case __constant_htons(ETH_P_ARP):
3411 case __constant_htons(ETH_P_IP):
3412 case __constant_htons(ETH_P_IPV6):
3413 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003414 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003415 return true;
3416 default:
3417 return false;
3418 }
3419}
3420
David S. Miller9754e292013-02-14 15:57:38 -05003421static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422{
3423 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003424 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003425 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003426 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003427 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003429 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430
Eric Dumazet588f0332011-11-15 04:12:55 +00003431 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003432
Koki Sanagicf66ba52010-08-23 18:45:02 +09003433 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003434
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003436 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003437 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003439 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003440
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003441 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003442 if (!skb_transport_header_was_set(skb))
3443 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003444 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445
3446 pt_prev = NULL;
3447
3448 rcu_read_lock();
3449
David S. Miller63d8ea72011-02-28 10:48:59 -08003450another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003451 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003452
3453 __this_cpu_inc(softnet_data.processed);
3454
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003455 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3456 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003457 skb = vlan_untag(skb);
3458 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003459 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003460 }
3461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462#ifdef CONFIG_NET_CLS_ACT
3463 if (skb->tc_verd & TC_NCLS) {
3464 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3465 goto ncls;
3466 }
3467#endif
3468
David S. Miller9754e292013-02-14 15:57:38 -05003469 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003470 goto skip_taps;
3471
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003473 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003474 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003475 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 pt_prev = ptype;
3477 }
3478 }
3479
Mel Gormanb4b9e352012-07-31 16:44:26 -07003480skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003482 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3483 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003484 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485ncls:
3486#endif
3487
David S. Miller9754e292013-02-14 15:57:38 -05003488 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003489 goto drop;
3490
John Fastabend24257172011-10-10 09:16:41 +00003491 if (vlan_tx_tag_present(skb)) {
3492 if (pt_prev) {
3493 ret = deliver_skb(skb, pt_prev, orig_dev);
3494 pt_prev = NULL;
3495 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003496 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003497 goto another_round;
3498 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003499 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003500 }
3501
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003502 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003503 if (rx_handler) {
3504 if (pt_prev) {
3505 ret = deliver_skb(skb, pt_prev, orig_dev);
3506 pt_prev = NULL;
3507 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003508 switch (rx_handler(&skb)) {
3509 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003510 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003511 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003512 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003513 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003514 case RX_HANDLER_EXACT:
3515 deliver_exact = true;
3516 case RX_HANDLER_PASS:
3517 break;
3518 default:
3519 BUG();
3520 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003523 if (vlan_tx_nonzero_tag_present(skb))
3524 skb->pkt_type = PACKET_OTHERHOST;
3525
David S. Miller63d8ea72011-02-28 10:48:59 -08003526 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003527 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003528
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003530 list_for_each_entry_rcu(ptype,
3531 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003532 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003533 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3534 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003535 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003536 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 pt_prev = ptype;
3538 }
3539 }
3540
3541 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003542 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003543 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003544 else
3545 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003547drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003548 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 kfree_skb(skb);
3550 /* Jamal, now you will not able to escape explaining
3551 * me how you were going to use this. :-)
3552 */
3553 ret = NET_RX_DROP;
3554 }
3555
Mel Gormanb4b9e352012-07-31 16:44:26 -07003556unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003558out:
David S. Miller9754e292013-02-14 15:57:38 -05003559 return ret;
3560}
3561
3562static int __netif_receive_skb(struct sk_buff *skb)
3563{
3564 int ret;
3565
3566 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3567 unsigned long pflags = current->flags;
3568
3569 /*
3570 * PFMEMALLOC skbs are special, they should
3571 * - be delivered to SOCK_MEMALLOC sockets only
3572 * - stay away from userspace
3573 * - have bounded memory usage
3574 *
3575 * Use PF_MEMALLOC as this saves us from propagating the allocation
3576 * context down to all allocation sites.
3577 */
3578 current->flags |= PF_MEMALLOC;
3579 ret = __netif_receive_skb_core(skb, true);
3580 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3581 } else
3582 ret = __netif_receive_skb_core(skb, false);
3583
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 return ret;
3585}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003586
3587/**
3588 * netif_receive_skb - process receive buffer from network
3589 * @skb: buffer to process
3590 *
3591 * netif_receive_skb() is the main receive data processing function.
3592 * It always succeeds. The buffer may be dropped during processing
3593 * for congestion control or by the protocol layers.
3594 *
3595 * This function may only be called from softirq context and interrupts
3596 * should be enabled.
3597 *
3598 * Return values (usually ignored):
3599 * NET_RX_SUCCESS: no congestion
3600 * NET_RX_DROP: packet was dropped
3601 */
3602int netif_receive_skb(struct sk_buff *skb)
3603{
Eric Dumazet588f0332011-11-15 04:12:55 +00003604 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003605
Richard Cochranc1f19b52010-07-17 08:49:36 +00003606 if (skb_defer_rx_timestamp(skb))
3607 return NET_RX_SUCCESS;
3608
Eric Dumazetdf334542010-03-24 19:13:54 +00003609#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003610 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003611 struct rps_dev_flow voidflow, *rflow = &voidflow;
3612 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003613
Eric Dumazet3b098e22010-05-15 23:57:10 -07003614 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003615
Eric Dumazet3b098e22010-05-15 23:57:10 -07003616 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003617
Eric Dumazet3b098e22010-05-15 23:57:10 -07003618 if (cpu >= 0) {
3619 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3620 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003621 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003622 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003623 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003624 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003625#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003626 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003627}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003628EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629
Eric Dumazet88751272010-04-19 05:07:33 +00003630/* Network device is going away, flush any packets still pending
3631 * Called with irqs disabled.
3632 */
Changli Gao152102c2010-03-30 20:16:22 +00003633static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003634{
Changli Gao152102c2010-03-30 20:16:22 +00003635 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003636 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003637 struct sk_buff *skb, *tmp;
3638
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003639 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003640 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003641 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003642 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003643 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003644 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003645 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003646 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003647 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003648
3649 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3650 if (skb->dev == dev) {
3651 __skb_unlink(skb, &sd->process_queue);
3652 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003653 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003654 }
3655 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003656}
3657
Herbert Xud565b0a2008-12-15 23:38:52 -08003658static int napi_gro_complete(struct sk_buff *skb)
3659{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003660 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003661 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003662 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003663 int err = -ENOENT;
3664
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003665 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3666
Herbert Xufc59f9a2009-04-14 15:11:06 -07003667 if (NAPI_GRO_CB(skb)->count == 1) {
3668 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003669 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003670 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003671
3672 rcu_read_lock();
3673 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003674 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003675 continue;
3676
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003677 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003678 break;
3679 }
3680 rcu_read_unlock();
3681
3682 if (err) {
3683 WARN_ON(&ptype->list == head);
3684 kfree_skb(skb);
3685 return NET_RX_SUCCESS;
3686 }
3687
3688out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003689 return netif_receive_skb(skb);
3690}
3691
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003692/* napi->gro_list contains packets ordered by age.
3693 * youngest packets at the head of it.
3694 * Complete skbs in reverse order to reduce latencies.
3695 */
3696void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003697{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003698 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003699
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003700 /* scan list and build reverse chain */
3701 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3702 skb->prev = prev;
3703 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003704 }
3705
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003706 for (skb = prev; skb; skb = prev) {
3707 skb->next = NULL;
3708
3709 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3710 return;
3711
3712 prev = skb->prev;
3713 napi_gro_complete(skb);
3714 napi->gro_count--;
3715 }
3716
Herbert Xud565b0a2008-12-15 23:38:52 -08003717 napi->gro_list = NULL;
3718}
Eric Dumazet86cac582010-08-31 18:25:32 +00003719EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003720
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003721static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3722{
3723 struct sk_buff *p;
3724 unsigned int maclen = skb->dev->hard_header_len;
3725
3726 for (p = napi->gro_list; p; p = p->next) {
3727 unsigned long diffs;
3728
3729 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3730 diffs |= p->vlan_tci ^ skb->vlan_tci;
3731 if (maclen == ETH_HLEN)
3732 diffs |= compare_ether_header(skb_mac_header(p),
3733 skb_gro_mac_header(skb));
3734 else if (!diffs)
3735 diffs = memcmp(skb_mac_header(p),
3736 skb_gro_mac_header(skb),
3737 maclen);
3738 NAPI_GRO_CB(p)->same_flow = !diffs;
3739 NAPI_GRO_CB(p)->flush = 0;
3740 }
3741}
3742
Rami Rosenbb728822012-11-28 21:55:25 +00003743static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003744{
3745 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003746 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003747 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003748 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003749 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003750 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003751
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003752 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003753 goto normal;
3754
David S. Miller21dc3302010-08-23 00:13:46 -07003755 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003756 goto normal;
3757
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003758 gro_list_prepare(napi, skb);
3759
Herbert Xud565b0a2008-12-15 23:38:52 -08003760 rcu_read_lock();
3761 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003762 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003763 continue;
3764
Herbert Xu86911732009-01-29 14:19:50 +00003765 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003766 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003767 NAPI_GRO_CB(skb)->same_flow = 0;
3768 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003769 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003770
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003771 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003772 break;
3773 }
3774 rcu_read_unlock();
3775
3776 if (&ptype->list == head)
3777 goto normal;
3778
Herbert Xu0da2afd52008-12-26 14:57:42 -08003779 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003780 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003781
Herbert Xud565b0a2008-12-15 23:38:52 -08003782 if (pp) {
3783 struct sk_buff *nskb = *pp;
3784
3785 *pp = nskb->next;
3786 nskb->next = NULL;
3787 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003788 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003789 }
3790
Herbert Xu0da2afd52008-12-26 14:57:42 -08003791 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003792 goto ok;
3793
Herbert Xu4ae55442009-02-08 18:00:36 +00003794 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003795 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003796
Herbert Xu4ae55442009-02-08 18:00:36 +00003797 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003798 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003799 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003800 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003801 skb->next = napi->gro_list;
3802 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003803 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003804
Herbert Xuad0f9902009-02-01 01:24:55 -08003805pull:
Herbert Xucb189782009-05-26 18:50:31 +00003806 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3807 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3808
3809 BUG_ON(skb->end - skb->tail < grow);
3810
3811 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3812
3813 skb->tail += grow;
3814 skb->data_len -= grow;
3815
3816 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003817 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003818
Eric Dumazet9e903e02011-10-18 21:00:24 +00003819 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003820 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003821 memmove(skb_shinfo(skb)->frags,
3822 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003823 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003824 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003825 }
3826
Herbert Xud565b0a2008-12-15 23:38:52 -08003827ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003828 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003829
3830normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003831 ret = GRO_NORMAL;
3832 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003833}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003834
Herbert Xu96e93ea2009-01-06 10:49:34 -08003835
Rami Rosenbb728822012-11-28 21:55:25 +00003836static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003837{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003838 switch (ret) {
3839 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003840 if (netif_receive_skb(skb))
3841 ret = GRO_DROP;
3842 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003843
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003844 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003845 kfree_skb(skb);
3846 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003847
Eric Dumazetdaa86542012-04-19 07:07:40 +00003848 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003849 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3850 kmem_cache_free(skbuff_head_cache, skb);
3851 else
3852 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003853 break;
3854
Ben Hutchings5b252f02009-10-29 07:17:09 +00003855 case GRO_HELD:
3856 case GRO_MERGED:
3857 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003858 }
3859
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003860 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003861}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003862
Eric Dumazetca07e432012-10-06 22:28:06 +00003863static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003864{
Eric Dumazetca07e432012-10-06 22:28:06 +00003865 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3866 const skb_frag_t *frag0 = &pinfo->frags[0];
3867
Herbert Xu78a478d2009-05-26 18:50:21 +00003868 NAPI_GRO_CB(skb)->data_offset = 0;
3869 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003870 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003871
Herbert Xu78d3fd02009-05-26 18:50:23 +00003872 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003873 pinfo->nr_frags &&
3874 !PageHighMem(skb_frag_page(frag0))) {
3875 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3876 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003877 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003878}
Herbert Xu78a478d2009-05-26 18:50:21 +00003879
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003880gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003881{
Herbert Xu86911732009-01-29 14:19:50 +00003882 skb_gro_reset_offset(skb);
3883
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003884 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003885}
3886EXPORT_SYMBOL(napi_gro_receive);
3887
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003888static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003889{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003890 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003891 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3892 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003893 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003894 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003895 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003896
3897 napi->skb = skb;
3898}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003899
Herbert Xu76620aa2009-04-16 02:02:07 -07003900struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003901{
Herbert Xu5d38a072009-01-04 16:13:40 -08003902 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003903
3904 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003905 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3906 if (skb)
3907 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003908 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003909 return skb;
3910}
Herbert Xu76620aa2009-04-16 02:02:07 -07003911EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003912
Rami Rosenbb728822012-11-28 21:55:25 +00003913static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003914 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003915{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003916 switch (ret) {
3917 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003918 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003919 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003920
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003921 if (ret == GRO_HELD)
3922 skb_gro_pull(skb, -ETH_HLEN);
3923 else if (netif_receive_skb(skb))
3924 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003925 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003926
3927 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003928 case GRO_MERGED_FREE:
3929 napi_reuse_skb(napi, skb);
3930 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003931
3932 case GRO_MERGED:
3933 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003934 }
3935
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003936 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003937}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003938
Eric Dumazet4adb9c42012-05-18 20:49:06 +00003939static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003940{
Herbert Xu76620aa2009-04-16 02:02:07 -07003941 struct sk_buff *skb = napi->skb;
3942 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00003943 unsigned int hlen;
3944 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07003945
3946 napi->skb = NULL;
3947
3948 skb_reset_mac_header(skb);
3949 skb_gro_reset_offset(skb);
3950
Herbert Xua5b1cf22009-05-26 18:50:28 +00003951 off = skb_gro_offset(skb);
3952 hlen = off + sizeof(*eth);
3953 eth = skb_gro_header_fast(skb, off);
3954 if (skb_gro_header_hard(skb, hlen)) {
3955 eth = skb_gro_header_slow(skb, hlen, off);
3956 if (unlikely(!eth)) {
3957 napi_reuse_skb(napi, skb);
3958 skb = NULL;
3959 goto out;
3960 }
Herbert Xu76620aa2009-04-16 02:02:07 -07003961 }
3962
3963 skb_gro_pull(skb, sizeof(*eth));
3964
3965 /*
3966 * This works because the only protocols we care about don't require
3967 * special handling. We'll fix it up properly at the end.
3968 */
3969 skb->protocol = eth->h_proto;
3970
3971out:
3972 return skb;
3973}
Herbert Xu76620aa2009-04-16 02:02:07 -07003974
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003975gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07003976{
3977 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003978
3979 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003980 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003981
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003982 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08003983}
3984EXPORT_SYMBOL(napi_gro_frags);
3985
Eric Dumazete326bed2010-04-22 00:22:45 -07003986/*
3987 * net_rps_action sends any pending IPI's for rps.
3988 * Note: called with local irq disabled, but exits with local irq enabled.
3989 */
3990static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3991{
3992#ifdef CONFIG_RPS
3993 struct softnet_data *remsd = sd->rps_ipi_list;
3994
3995 if (remsd) {
3996 sd->rps_ipi_list = NULL;
3997
3998 local_irq_enable();
3999
4000 /* Send pending IPI's to kick RPS processing on remote cpus. */
4001 while (remsd) {
4002 struct softnet_data *next = remsd->rps_ipi_next;
4003
4004 if (cpu_online(remsd->cpu))
4005 __smp_call_function_single(remsd->cpu,
4006 &remsd->csd, 0);
4007 remsd = next;
4008 }
4009 } else
4010#endif
4011 local_irq_enable();
4012}
4013
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004014static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015{
4016 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004017 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018
Eric Dumazete326bed2010-04-22 00:22:45 -07004019#ifdef CONFIG_RPS
4020 /* Check if we have pending ipi, its better to send them now,
4021 * not waiting net_rx_action() end.
4022 */
4023 if (sd->rps_ipi_list) {
4024 local_irq_disable();
4025 net_rps_action_and_irq_enable(sd);
4026 }
4027#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004028 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004029 local_irq_disable();
4030 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004032 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033
Changli Gao6e7676c2010-04-27 15:07:33 -07004034 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004035 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004036 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004037 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004038 input_queue_head_incr(sd);
4039 if (++work >= quota) {
4040 local_irq_enable();
4041 return work;
4042 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004043 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044
Changli Gao6e7676c2010-04-27 15:07:33 -07004045 rps_lock(sd);
4046 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004047 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004048 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4049 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004050
Changli Gao6e7676c2010-04-27 15:07:33 -07004051 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004052 /*
4053 * Inline a custom version of __napi_complete().
4054 * only current cpu owns and manipulates this napi,
4055 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4056 * we can use a plain write instead of clear_bit(),
4057 * and we dont need an smp_mb() memory barrier.
4058 */
4059 list_del(&napi->poll_list);
4060 napi->state = 0;
4061
Changli Gao6e7676c2010-04-27 15:07:33 -07004062 quota = work + qlen;
4063 }
4064 rps_unlock(sd);
4065 }
4066 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004068 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069}
4070
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004071/**
4072 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004073 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004074 *
4075 * The entry's receive function will be scheduled to run
4076 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004077void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004078{
4079 unsigned long flags;
4080
4081 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004082 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004083 local_irq_restore(flags);
4084}
4085EXPORT_SYMBOL(__napi_schedule);
4086
Herbert Xud565b0a2008-12-15 23:38:52 -08004087void __napi_complete(struct napi_struct *n)
4088{
4089 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4090 BUG_ON(n->gro_list);
4091
4092 list_del(&n->poll_list);
4093 smp_mb__before_clear_bit();
4094 clear_bit(NAPI_STATE_SCHED, &n->state);
4095}
4096EXPORT_SYMBOL(__napi_complete);
4097
4098void napi_complete(struct napi_struct *n)
4099{
4100 unsigned long flags;
4101
4102 /*
4103 * don't let napi dequeue from the cpu poll list
4104 * just in case its running on a different cpu
4105 */
4106 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4107 return;
4108
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004109 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004110 local_irq_save(flags);
4111 __napi_complete(n);
4112 local_irq_restore(flags);
4113}
4114EXPORT_SYMBOL(napi_complete);
4115
4116void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4117 int (*poll)(struct napi_struct *, int), int weight)
4118{
4119 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004120 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004121 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004122 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004123 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004124 if (weight > NAPI_POLL_WEIGHT)
4125 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4126 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004127 napi->weight = weight;
4128 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004129 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004130#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004131 spin_lock_init(&napi->poll_lock);
4132 napi->poll_owner = -1;
4133#endif
4134 set_bit(NAPI_STATE_SCHED, &napi->state);
4135}
4136EXPORT_SYMBOL(netif_napi_add);
4137
4138void netif_napi_del(struct napi_struct *napi)
4139{
4140 struct sk_buff *skb, *next;
4141
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004142 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004143 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004144
4145 for (skb = napi->gro_list; skb; skb = next) {
4146 next = skb->next;
4147 skb->next = NULL;
4148 kfree_skb(skb);
4149 }
4150
4151 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004152 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004153}
4154EXPORT_SYMBOL(netif_napi_del);
4155
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156static void net_rx_action(struct softirq_action *h)
4157{
Eric Dumazete326bed2010-04-22 00:22:45 -07004158 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004159 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004160 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004161 void *have;
4162
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 local_irq_disable();
4164
Eric Dumazete326bed2010-04-22 00:22:45 -07004165 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004166 struct napi_struct *n;
4167 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004169 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004170 * Allow this to run for 2 jiffies since which will allow
4171 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004172 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004173 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 goto softnet_break;
4175
4176 local_irq_enable();
4177
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004178 /* Even though interrupts have been re-enabled, this
4179 * access is safe because interrupts can only add new
4180 * entries to the tail of this list, and only ->poll()
4181 * calls can remove this head entry from the list.
4182 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004183 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004185 have = netpoll_poll_lock(n);
4186
4187 weight = n->weight;
4188
David S. Miller0a7606c2007-10-29 21:28:47 -07004189 /* This NAPI_STATE_SCHED test is for avoiding a race
4190 * with netpoll's poll_napi(). Only the entity which
4191 * obtains the lock and sees NAPI_STATE_SCHED set will
4192 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004193 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004194 */
4195 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004196 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004197 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004198 trace_napi_poll(n);
4199 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004200
4201 WARN_ON_ONCE(work > weight);
4202
4203 budget -= work;
4204
4205 local_irq_disable();
4206
4207 /* Drivers must not modify the NAPI state if they
4208 * consume the entire weight. In such cases this code
4209 * still "owns" the NAPI instance and therefore can
4210 * move the instance around on the list at-will.
4211 */
David S. Millerfed17f32008-01-07 21:00:40 -08004212 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004213 if (unlikely(napi_disable_pending(n))) {
4214 local_irq_enable();
4215 napi_complete(n);
4216 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004217 } else {
4218 if (n->gro_list) {
4219 /* flush too old packets
4220 * If HZ < 1000, flush all packets.
4221 */
4222 local_irq_enable();
4223 napi_gro_flush(n, HZ >= 1000);
4224 local_irq_disable();
4225 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004226 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004227 }
David S. Millerfed17f32008-01-07 21:00:40 -08004228 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004229
4230 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231 }
4232out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004233 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004234
Chris Leechdb217332006-06-17 21:24:58 -07004235#ifdef CONFIG_NET_DMA
4236 /*
4237 * There may not be any more sk_buffs coming right now, so push
4238 * any pending DMA copies to hardware
4239 */
Dan Williams2ba05622009-01-06 11:38:14 -07004240 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004241#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004242
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 return;
4244
4245softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004246 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4248 goto out;
4249}
4250
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004251struct netdev_upper {
4252 struct net_device *dev;
4253 bool master;
4254 struct list_head list;
4255 struct rcu_head rcu;
4256 struct list_head search_list;
4257};
4258
4259static void __append_search_uppers(struct list_head *search_list,
4260 struct net_device *dev)
4261{
4262 struct netdev_upper *upper;
4263
4264 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4265 /* check if this upper is not already in search list */
4266 if (list_empty(&upper->search_list))
4267 list_add_tail(&upper->search_list, search_list);
4268 }
4269}
4270
4271static bool __netdev_search_upper_dev(struct net_device *dev,
4272 struct net_device *upper_dev)
4273{
4274 LIST_HEAD(search_list);
4275 struct netdev_upper *upper;
4276 struct netdev_upper *tmp;
4277 bool ret = false;
4278
4279 __append_search_uppers(&search_list, dev);
4280 list_for_each_entry(upper, &search_list, search_list) {
4281 if (upper->dev == upper_dev) {
4282 ret = true;
4283 break;
4284 }
4285 __append_search_uppers(&search_list, upper->dev);
4286 }
4287 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4288 INIT_LIST_HEAD(&upper->search_list);
4289 return ret;
4290}
4291
4292static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4293 struct net_device *upper_dev)
4294{
4295 struct netdev_upper *upper;
4296
4297 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4298 if (upper->dev == upper_dev)
4299 return upper;
4300 }
4301 return NULL;
4302}
4303
4304/**
4305 * netdev_has_upper_dev - Check if device is linked to an upper device
4306 * @dev: device
4307 * @upper_dev: upper device to check
4308 *
4309 * Find out if a device is linked to specified upper device and return true
4310 * in case it is. Note that this checks only immediate upper device,
4311 * not through a complete stack of devices. The caller must hold the RTNL lock.
4312 */
4313bool netdev_has_upper_dev(struct net_device *dev,
4314 struct net_device *upper_dev)
4315{
4316 ASSERT_RTNL();
4317
4318 return __netdev_find_upper(dev, upper_dev);
4319}
4320EXPORT_SYMBOL(netdev_has_upper_dev);
4321
4322/**
4323 * netdev_has_any_upper_dev - Check if device is linked to some device
4324 * @dev: device
4325 *
4326 * Find out if a device is linked to an upper device and return true in case
4327 * it is. The caller must hold the RTNL lock.
4328 */
4329bool netdev_has_any_upper_dev(struct net_device *dev)
4330{
4331 ASSERT_RTNL();
4332
4333 return !list_empty(&dev->upper_dev_list);
4334}
4335EXPORT_SYMBOL(netdev_has_any_upper_dev);
4336
4337/**
4338 * netdev_master_upper_dev_get - Get master upper device
4339 * @dev: device
4340 *
4341 * Find a master upper device and return pointer to it or NULL in case
4342 * it's not there. The caller must hold the RTNL lock.
4343 */
4344struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4345{
4346 struct netdev_upper *upper;
4347
4348 ASSERT_RTNL();
4349
4350 if (list_empty(&dev->upper_dev_list))
4351 return NULL;
4352
4353 upper = list_first_entry(&dev->upper_dev_list,
4354 struct netdev_upper, list);
4355 if (likely(upper->master))
4356 return upper->dev;
4357 return NULL;
4358}
4359EXPORT_SYMBOL(netdev_master_upper_dev_get);
4360
4361/**
4362 * netdev_master_upper_dev_get_rcu - Get master upper device
4363 * @dev: device
4364 *
4365 * Find a master upper device and return pointer to it or NULL in case
4366 * it's not there. The caller must hold the RCU read lock.
4367 */
4368struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4369{
4370 struct netdev_upper *upper;
4371
4372 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4373 struct netdev_upper, list);
4374 if (upper && likely(upper->master))
4375 return upper->dev;
4376 return NULL;
4377}
4378EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4379
4380static int __netdev_upper_dev_link(struct net_device *dev,
4381 struct net_device *upper_dev, bool master)
4382{
4383 struct netdev_upper *upper;
4384
4385 ASSERT_RTNL();
4386
4387 if (dev == upper_dev)
4388 return -EBUSY;
4389
4390 /* To prevent loops, check if dev is not upper device to upper_dev. */
4391 if (__netdev_search_upper_dev(upper_dev, dev))
4392 return -EBUSY;
4393
4394 if (__netdev_find_upper(dev, upper_dev))
4395 return -EEXIST;
4396
4397 if (master && netdev_master_upper_dev_get(dev))
4398 return -EBUSY;
4399
4400 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4401 if (!upper)
4402 return -ENOMEM;
4403
4404 upper->dev = upper_dev;
4405 upper->master = master;
4406 INIT_LIST_HEAD(&upper->search_list);
4407
4408 /* Ensure that master upper link is always the first item in list. */
4409 if (master)
4410 list_add_rcu(&upper->list, &dev->upper_dev_list);
4411 else
4412 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4413 dev_hold(upper_dev);
4414
4415 return 0;
4416}
4417
4418/**
4419 * netdev_upper_dev_link - Add a link to the upper device
4420 * @dev: device
4421 * @upper_dev: new upper device
4422 *
4423 * Adds a link to device which is upper to this one. The caller must hold
4424 * the RTNL lock. On a failure a negative errno code is returned.
4425 * On success the reference counts are adjusted and the function
4426 * returns zero.
4427 */
4428int netdev_upper_dev_link(struct net_device *dev,
4429 struct net_device *upper_dev)
4430{
4431 return __netdev_upper_dev_link(dev, upper_dev, false);
4432}
4433EXPORT_SYMBOL(netdev_upper_dev_link);
4434
4435/**
4436 * netdev_master_upper_dev_link - Add a master link to the upper device
4437 * @dev: device
4438 * @upper_dev: new upper device
4439 *
4440 * Adds a link to device which is upper to this one. In this case, only
4441 * one master upper device can be linked, although other non-master devices
4442 * might be linked as well. The caller must hold the RTNL lock.
4443 * On a failure a negative errno code is returned. On success the reference
4444 * counts are adjusted and the function returns zero.
4445 */
4446int netdev_master_upper_dev_link(struct net_device *dev,
4447 struct net_device *upper_dev)
4448{
4449 return __netdev_upper_dev_link(dev, upper_dev, true);
4450}
4451EXPORT_SYMBOL(netdev_master_upper_dev_link);
4452
4453/**
4454 * netdev_upper_dev_unlink - Removes a link to upper device
4455 * @dev: device
4456 * @upper_dev: new upper device
4457 *
4458 * Removes a link to device which is upper to this one. The caller must hold
4459 * the RTNL lock.
4460 */
4461void netdev_upper_dev_unlink(struct net_device *dev,
4462 struct net_device *upper_dev)
4463{
4464 struct netdev_upper *upper;
4465
4466 ASSERT_RTNL();
4467
4468 upper = __netdev_find_upper(dev, upper_dev);
4469 if (!upper)
4470 return;
4471 list_del_rcu(&upper->list);
4472 dev_put(upper_dev);
4473 kfree_rcu(upper, rcu);
4474}
4475EXPORT_SYMBOL(netdev_upper_dev_unlink);
4476
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004477static void dev_change_rx_flags(struct net_device *dev, int flags)
4478{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004479 const struct net_device_ops *ops = dev->netdev_ops;
4480
4481 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4482 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004483}
4484
Wang Chendad9b332008-06-18 01:48:28 -07004485static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07004486{
Eric Dumazetb536db92011-11-30 21:42:26 +00004487 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004488 kuid_t uid;
4489 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07004490
Patrick McHardy24023452007-07-14 18:51:31 -07004491 ASSERT_RTNL();
4492
Wang Chendad9b332008-06-18 01:48:28 -07004493 dev->flags |= IFF_PROMISC;
4494 dev->promiscuity += inc;
4495 if (dev->promiscuity == 0) {
4496 /*
4497 * Avoid overflow.
4498 * If inc causes overflow, untouch promisc and return error.
4499 */
4500 if (inc < 0)
4501 dev->flags &= ~IFF_PROMISC;
4502 else {
4503 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004504 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4505 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004506 return -EOVERFLOW;
4507 }
4508 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004509 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004510 pr_info("device %s %s promiscuous mode\n",
4511 dev->name,
4512 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11004513 if (audit_enabled) {
4514 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004515 audit_log(current->audit_context, GFP_ATOMIC,
4516 AUDIT_ANOM_PROMISCUOUS,
4517 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4518 dev->name, (dev->flags & IFF_PROMISC),
4519 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07004520 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004521 from_kuid(&init_user_ns, uid),
4522 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004523 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11004524 }
Patrick McHardy24023452007-07-14 18:51:31 -07004525
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004526 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07004527 }
Wang Chendad9b332008-06-18 01:48:28 -07004528 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004529}
4530
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531/**
4532 * dev_set_promiscuity - update promiscuity count on a device
4533 * @dev: device
4534 * @inc: modifier
4535 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07004536 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 * remains above zero the interface remains promiscuous. Once it hits zero
4538 * the device reverts back to normal filtering operation. A negative inc
4539 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07004540 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541 */
Wang Chendad9b332008-06-18 01:48:28 -07004542int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543{
Eric Dumazetb536db92011-11-30 21:42:26 +00004544 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07004545 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546
Wang Chendad9b332008-06-18 01:48:28 -07004547 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07004548 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07004549 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07004550 if (dev->flags != old_flags)
4551 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07004552 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004554EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555
4556/**
4557 * dev_set_allmulti - update allmulti count on a device
4558 * @dev: device
4559 * @inc: modifier
4560 *
4561 * Add or remove reception of all multicast frames to a device. While the
4562 * count in the device remains above zero the interface remains listening
4563 * to all interfaces. Once it hits zero the device reverts back to normal
4564 * filtering operation. A negative @inc value is used to drop the counter
4565 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07004566 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567 */
4568
Wang Chendad9b332008-06-18 01:48:28 -07004569int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570{
Eric Dumazetb536db92011-11-30 21:42:26 +00004571 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572
Patrick McHardy24023452007-07-14 18:51:31 -07004573 ASSERT_RTNL();
4574
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07004576 dev->allmulti += inc;
4577 if (dev->allmulti == 0) {
4578 /*
4579 * Avoid overflow.
4580 * If inc causes overflow, untouch allmulti and return error.
4581 */
4582 if (inc < 0)
4583 dev->flags &= ~IFF_ALLMULTI;
4584 else {
4585 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004586 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4587 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004588 return -EOVERFLOW;
4589 }
4590 }
Patrick McHardy24023452007-07-14 18:51:31 -07004591 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004592 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07004593 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07004594 }
Wang Chendad9b332008-06-18 01:48:28 -07004595 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004596}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004597EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07004598
4599/*
4600 * Upload unicast and multicast address lists to device and
4601 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08004602 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07004603 * are present.
4604 */
4605void __dev_set_rx_mode(struct net_device *dev)
4606{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004607 const struct net_device_ops *ops = dev->netdev_ops;
4608
Patrick McHardy4417da62007-06-27 01:28:10 -07004609 /* dev_open will call this function so the list will stay sane. */
4610 if (!(dev->flags&IFF_UP))
4611 return;
4612
4613 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09004614 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07004615
Jiri Pirko01789342011-08-16 06:29:00 +00004616 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004617 /* Unicast addresses changes may only happen under the rtnl,
4618 * therefore calling __dev_set_promiscuity here is safe.
4619 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004620 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004621 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07004622 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004623 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004624 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07004625 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07004626 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004627 }
Jiri Pirko01789342011-08-16 06:29:00 +00004628
4629 if (ops->ndo_set_rx_mode)
4630 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004631}
4632
4633void dev_set_rx_mode(struct net_device *dev)
4634{
David S. Millerb9e40852008-07-15 00:15:08 -07004635 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004636 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07004637 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638}
4639
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004640/**
4641 * dev_get_flags - get flags reported to userspace
4642 * @dev: device
4643 *
4644 * Get the combination of flag bits exported through APIs to userspace.
4645 */
Eric Dumazet95c96172012-04-15 05:58:06 +00004646unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647{
Eric Dumazet95c96172012-04-15 05:58:06 +00004648 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649
4650 flags = (dev->flags & ~(IFF_PROMISC |
4651 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004652 IFF_RUNNING |
4653 IFF_LOWER_UP |
4654 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655 (dev->gflags & (IFF_PROMISC |
4656 IFF_ALLMULTI));
4657
Stefan Rompfb00055a2006-03-20 17:09:11 -08004658 if (netif_running(dev)) {
4659 if (netif_oper_up(dev))
4660 flags |= IFF_RUNNING;
4661 if (netif_carrier_ok(dev))
4662 flags |= IFF_LOWER_UP;
4663 if (netif_dormant(dev))
4664 flags |= IFF_DORMANT;
4665 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666
4667 return flags;
4668}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004669EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670
Patrick McHardybd380812010-02-26 06:34:53 +00004671int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672{
Eric Dumazetb536db92011-11-30 21:42:26 +00004673 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004674 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675
Patrick McHardy24023452007-07-14 18:51:31 -07004676 ASSERT_RTNL();
4677
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 /*
4679 * Set the flags on our device.
4680 */
4681
4682 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4683 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4684 IFF_AUTOMEDIA)) |
4685 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4686 IFF_ALLMULTI));
4687
4688 /*
4689 * Load in the correct multicast list now the flags have changed.
4690 */
4691
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004692 if ((old_flags ^ flags) & IFF_MULTICAST)
4693 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004694
Patrick McHardy4417da62007-06-27 01:28:10 -07004695 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696
4697 /*
4698 * Have we downed the interface. We handle IFF_UP ourselves
4699 * according to user attempts to set it, rather than blindly
4700 * setting it.
4701 */
4702
4703 ret = 0;
4704 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004705 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706
4707 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004708 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709 }
4710
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004712 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4713
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714 dev->gflags ^= IFF_PROMISC;
4715 dev_set_promiscuity(dev, inc);
4716 }
4717
4718 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4719 is important. Some (broken) drivers set IFF_PROMISC, when
4720 IFF_ALLMULTI is requested not asking us and not reporting.
4721 */
4722 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004723 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4724
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725 dev->gflags ^= IFF_ALLMULTI;
4726 dev_set_allmulti(dev, inc);
4727 }
4728
Patrick McHardybd380812010-02-26 06:34:53 +00004729 return ret;
4730}
4731
4732void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4733{
4734 unsigned int changes = dev->flags ^ old_flags;
4735
4736 if (changes & IFF_UP) {
4737 if (dev->flags & IFF_UP)
4738 call_netdevice_notifiers(NETDEV_UP, dev);
4739 else
4740 call_netdevice_notifiers(NETDEV_DOWN, dev);
4741 }
4742
4743 if (dev->flags & IFF_UP &&
4744 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4745 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4746}
4747
4748/**
4749 * dev_change_flags - change device settings
4750 * @dev: device
4751 * @flags: device state flags
4752 *
4753 * Change settings on device based state flags. The flags are
4754 * in the userspace exported format.
4755 */
Eric Dumazetb536db92011-11-30 21:42:26 +00004756int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00004757{
Eric Dumazetb536db92011-11-30 21:42:26 +00004758 int ret;
4759 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004760
4761 ret = __dev_change_flags(dev, flags);
4762 if (ret < 0)
4763 return ret;
4764
4765 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004766 if (changes)
4767 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768
Patrick McHardybd380812010-02-26 06:34:53 +00004769 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 return ret;
4771}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004772EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004774/**
4775 * dev_set_mtu - Change maximum transfer unit
4776 * @dev: device
4777 * @new_mtu: new transfer unit
4778 *
4779 * Change the maximum transfer size of the network device.
4780 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781int dev_set_mtu(struct net_device *dev, int new_mtu)
4782{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004783 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784 int err;
4785
4786 if (new_mtu == dev->mtu)
4787 return 0;
4788
4789 /* MTU must be positive. */
4790 if (new_mtu < 0)
4791 return -EINVAL;
4792
4793 if (!netif_device_present(dev))
4794 return -ENODEV;
4795
4796 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004797 if (ops->ndo_change_mtu)
4798 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799 else
4800 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004801
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00004802 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004803 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 return err;
4805}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004806EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004808/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00004809 * dev_set_group - Change group this device belongs to
4810 * @dev: device
4811 * @new_group: group this device should belong to
4812 */
4813void dev_set_group(struct net_device *dev, int new_group)
4814{
4815 dev->group = new_group;
4816}
4817EXPORT_SYMBOL(dev_set_group);
4818
4819/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004820 * dev_set_mac_address - Change Media Access Control Address
4821 * @dev: device
4822 * @sa: new address
4823 *
4824 * Change the hardware (MAC) address of the device
4825 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4827{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004828 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829 int err;
4830
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004831 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832 return -EOPNOTSUPP;
4833 if (sa->sa_family != dev->type)
4834 return -EINVAL;
4835 if (!netif_device_present(dev))
4836 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004837 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00004838 if (err)
4839 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00004840 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00004841 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04004842 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00004843 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004844}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004845EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004847/**
4848 * dev_change_carrier - Change device carrier
4849 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00004850 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004851 *
4852 * Change device carrier
4853 */
4854int dev_change_carrier(struct net_device *dev, bool new_carrier)
4855{
4856 const struct net_device_ops *ops = dev->netdev_ops;
4857
4858 if (!ops->ndo_change_carrier)
4859 return -EOPNOTSUPP;
4860 if (!netif_device_present(dev))
4861 return -ENODEV;
4862 return ops->ndo_change_carrier(dev, new_carrier);
4863}
4864EXPORT_SYMBOL(dev_change_carrier);
4865
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866/**
4867 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004868 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 *
4870 * Returns a suitable unique value for a new device interface
4871 * number. The caller must hold the rtnl semaphore or the
4872 * dev_base_lock to be sure it remains unique.
4873 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004874static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004876 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004877 for (;;) {
4878 if (++ifindex <= 0)
4879 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004880 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004881 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882 }
4883}
4884
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004886static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004888static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891}
4892
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004893static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004894{
Krishna Kumare93737b2009-12-08 22:26:02 +00004895 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004896
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004897 BUG_ON(dev_boot_phase);
4898 ASSERT_RTNL();
4899
Krishna Kumare93737b2009-12-08 22:26:02 +00004900 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004901 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004902 * for initialization unwind. Remove those
4903 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004904 */
4905 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004906 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
4907 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004908
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004909 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004910 list_del(&dev->unreg_list);
4911 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004912 }
Eric Dumazet449f4542011-05-19 12:24:16 +00004913 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004914 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00004915 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004916
Octavian Purdila44345722010-12-13 12:44:07 +00004917 /* If device is running, close it first. */
4918 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004919
Octavian Purdila44345722010-12-13 12:44:07 +00004920 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004921 /* And unlink it from device chain. */
4922 unlist_netdevice(dev);
4923
4924 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004925 }
4926
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004927 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004928
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004929 list_for_each_entry(dev, head, unreg_list) {
4930 /* Shutdown queueing discipline. */
4931 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004932
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004933
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004934 /* Notify protocols, that we are about to destroy
4935 this device. They should clean all the things.
4936 */
4937 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4938
Patrick McHardya2835762010-02-26 06:34:51 +00004939 if (!dev->rtnl_link_ops ||
4940 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4941 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4942
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004943 /*
4944 * Flush the unicast and multicast chains
4945 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00004946 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004947 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004948
4949 if (dev->netdev_ops->ndo_uninit)
4950 dev->netdev_ops->ndo_uninit(dev);
4951
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004952 /* Notifier chain MUST detach us all upper devices. */
4953 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004954
4955 /* Remove entries from kobject tree */
4956 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00004957#ifdef CONFIG_XPS
4958 /* Remove XPS queueing entries */
4959 netif_reset_xps_queues_gt(dev, 0);
4960#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004961 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004962
Eric W. Biederman850a5452011-10-13 22:25:23 +00004963 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004964
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004965 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004966 dev_put(dev);
4967}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004968
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004969static void rollback_registered(struct net_device *dev)
4970{
4971 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004972
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004973 list_add(&dev->unreg_list, &single);
4974 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00004975 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004976}
4977
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004978static netdev_features_t netdev_fix_features(struct net_device *dev,
4979 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07004980{
Michał Mirosław57422dc2011-01-22 12:14:12 +00004981 /* Fix illegal checksum combinations */
4982 if ((features & NETIF_F_HW_CSUM) &&
4983 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004984 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00004985 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4986 }
4987
Herbert Xub63365a2008-10-23 01:11:29 -07004988 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00004989 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004990 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00004991 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07004992 }
4993
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004994 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
4995 !(features & NETIF_F_IP_CSUM)) {
4996 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
4997 features &= ~NETIF_F_TSO;
4998 features &= ~NETIF_F_TSO_ECN;
4999 }
5000
5001 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5002 !(features & NETIF_F_IPV6_CSUM)) {
5003 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5004 features &= ~NETIF_F_TSO6;
5005 }
5006
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005007 /* TSO ECN requires that TSO is present as well. */
5008 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5009 features &= ~NETIF_F_TSO_ECN;
5010
Michał Mirosław212b5732011-02-15 16:59:16 +00005011 /* Software GSO depends on SG. */
5012 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005013 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005014 features &= ~NETIF_F_GSO;
5015 }
5016
Michał Mirosławacd11302011-01-24 15:45:15 -08005017 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005018 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005019 /* maybe split UFO into V4 and V6? */
5020 if (!((features & NETIF_F_GEN_CSUM) ||
5021 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5022 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005023 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005024 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005025 features &= ~NETIF_F_UFO;
5026 }
5027
5028 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005029 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005030 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005031 features &= ~NETIF_F_UFO;
5032 }
5033 }
5034
5035 return features;
5036}
Herbert Xub63365a2008-10-23 01:11:29 -07005037
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005038int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005039{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005040 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005041 int err = 0;
5042
Michał Mirosław87267482011-04-12 09:56:38 +00005043 ASSERT_RTNL();
5044
Michał Mirosław5455c692011-02-15 16:59:17 +00005045 features = netdev_get_wanted_features(dev);
5046
5047 if (dev->netdev_ops->ndo_fix_features)
5048 features = dev->netdev_ops->ndo_fix_features(dev, features);
5049
5050 /* driver might be less strict about feature dependencies */
5051 features = netdev_fix_features(dev, features);
5052
5053 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005054 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005055
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005056 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5057 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005058
5059 if (dev->netdev_ops->ndo_set_features)
5060 err = dev->netdev_ops->ndo_set_features(dev, features);
5061
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005062 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005063 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005064 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5065 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005066 return -1;
5067 }
5068
5069 if (!err)
5070 dev->features = features;
5071
5072 return 1;
5073}
5074
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005075/**
5076 * netdev_update_features - recalculate device features
5077 * @dev: the device to check
5078 *
5079 * Recalculate dev->features set and send notifications if it
5080 * has changed. Should be called after driver or hardware dependent
5081 * conditions might have changed that influence the features.
5082 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005083void netdev_update_features(struct net_device *dev)
5084{
5085 if (__netdev_update_features(dev))
5086 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005087}
5088EXPORT_SYMBOL(netdev_update_features);
5089
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005091 * netdev_change_features - recalculate device features
5092 * @dev: the device to check
5093 *
5094 * Recalculate dev->features set and send notifications even
5095 * if they have not changed. Should be called instead of
5096 * netdev_update_features() if also dev->vlan_features might
5097 * have changed to allow the changes to be propagated to stacked
5098 * VLAN devices.
5099 */
5100void netdev_change_features(struct net_device *dev)
5101{
5102 __netdev_update_features(dev);
5103 netdev_features_change(dev);
5104}
5105EXPORT_SYMBOL(netdev_change_features);
5106
5107/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005108 * netif_stacked_transfer_operstate - transfer operstate
5109 * @rootdev: the root or lower level device to transfer state from
5110 * @dev: the device to transfer operstate to
5111 *
5112 * Transfer operational state from root to device. This is normally
5113 * called when a stacking relationship exists between the root
5114 * device and the device(a leaf device).
5115 */
5116void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5117 struct net_device *dev)
5118{
5119 if (rootdev->operstate == IF_OPER_DORMANT)
5120 netif_dormant_on(dev);
5121 else
5122 netif_dormant_off(dev);
5123
5124 if (netif_carrier_ok(rootdev)) {
5125 if (!netif_carrier_ok(dev))
5126 netif_carrier_on(dev);
5127 } else {
5128 if (netif_carrier_ok(dev))
5129 netif_carrier_off(dev);
5130 }
5131}
5132EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5133
Tom Herbertbf264142010-11-26 08:36:09 +00005134#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005135static int netif_alloc_rx_queues(struct net_device *dev)
5136{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005137 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005138 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005139
Tom Herbertbd25fa72010-10-18 18:00:16 +00005140 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005141
Tom Herbertbd25fa72010-10-18 18:00:16 +00005142 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005143 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005144 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005145
Tom Herbertbd25fa72010-10-18 18:00:16 +00005146 dev->_rx = rx;
5147
Tom Herbertbd25fa72010-10-18 18:00:16 +00005148 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005149 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005150 return 0;
5151}
Tom Herbertbf264142010-11-26 08:36:09 +00005152#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005153
Changli Gaoaa942102010-12-04 02:31:41 +00005154static void netdev_init_one_queue(struct net_device *dev,
5155 struct netdev_queue *queue, void *_unused)
5156{
5157 /* Initialize queue lock */
5158 spin_lock_init(&queue->_xmit_lock);
5159 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5160 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005161 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005162 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005163#ifdef CONFIG_BQL
5164 dql_init(&queue->dql, HZ);
5165#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005166}
5167
Tom Herberte6484932010-10-18 18:04:39 +00005168static int netif_alloc_netdev_queues(struct net_device *dev)
5169{
5170 unsigned int count = dev->num_tx_queues;
5171 struct netdev_queue *tx;
5172
5173 BUG_ON(count < 1);
5174
5175 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005176 if (!tx)
Tom Herberte6484932010-10-18 18:04:39 +00005177 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005178
Tom Herberte6484932010-10-18 18:04:39 +00005179 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005180
Tom Herberte6484932010-10-18 18:04:39 +00005181 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5182 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005183
5184 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005185}
5186
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005187/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188 * register_netdevice - register a network device
5189 * @dev: device to register
5190 *
5191 * Take a completed network device structure and add it to the kernel
5192 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5193 * chain. 0 is returned on success. A negative errno code is returned
5194 * on a failure to set up the device, or if the name is a duplicate.
5195 *
5196 * Callers must hold the rtnl semaphore. You may want
5197 * register_netdev() instead of this.
5198 *
5199 * BUGS:
5200 * The locking appears insufficient to guarantee two parallel registers
5201 * will not get the same name.
5202 */
5203
5204int register_netdevice(struct net_device *dev)
5205{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005207 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208
5209 BUG_ON(dev_boot_phase);
5210 ASSERT_RTNL();
5211
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005212 might_sleep();
5213
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 /* When net_device's are persistent, this will be fatal. */
5215 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005216 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217
David S. Millerf1f28aa2008-07-15 00:08:33 -07005218 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005219 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 dev->iflink = -1;
5222
Gao feng828de4f2012-09-13 20:58:27 +00005223 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005224 if (ret < 0)
5225 goto out;
5226
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005228 if (dev->netdev_ops->ndo_init) {
5229 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 if (ret) {
5231 if (ret > 0)
5232 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005233 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 }
5235 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005236
Patrick McHardyf6469682013-04-19 02:04:27 +00005237 if (((dev->hw_features | dev->features) &
5238 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005239 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5240 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5241 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5242 ret = -EINVAL;
5243 goto err_uninit;
5244 }
5245
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005246 ret = -EBUSY;
5247 if (!dev->ifindex)
5248 dev->ifindex = dev_new_index(net);
5249 else if (__dev_get_by_index(net, dev->ifindex))
5250 goto err_uninit;
5251
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 if (dev->iflink == -1)
5253 dev->iflink = dev->ifindex;
5254
Michał Mirosław5455c692011-02-15 16:59:17 +00005255 /* Transfer changeable features to wanted_features and enable
5256 * software offloads (GSO and GRO).
5257 */
5258 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005259 dev->features |= NETIF_F_SOFT_FEATURES;
5260 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005262 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00005263 if (!(dev->flags & IFF_LOOPBACK)) {
5264 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5265 if (dev->features & NETIF_F_ALL_CSUM) {
5266 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5267 dev->features |= NETIF_F_NOCACHE_COPY;
5268 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005269 }
5270
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005271 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005272 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005273 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005274
Pravin B Shelaree579672013-03-07 09:28:08 +00005275 /* Make NETIF_F_SG inheritable to tunnel devices.
5276 */
5277 dev->hw_enc_features |= NETIF_F_SG;
5278
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005279 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5280 ret = notifier_to_errno(ret);
5281 if (ret)
5282 goto err_uninit;
5283
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005284 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005285 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005286 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005287 dev->reg_state = NETREG_REGISTERED;
5288
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005289 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005290
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291 /*
5292 * Default initial state at registry is that the
5293 * device is present.
5294 */
5295
5296 set_bit(__LINK_STATE_PRESENT, &dev->state);
5297
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005298 linkwatch_init_dev(dev);
5299
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005302 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005303 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304
Jiri Pirko948b3372013-01-08 01:38:25 +00005305 /* If the device has permanent device address, driver should
5306 * set dev_addr and also addr_assign_type should be set to
5307 * NET_ADDR_PERM (default value).
5308 */
5309 if (dev->addr_assign_type == NET_ADDR_PERM)
5310 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5311
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005313 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005314 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005315 if (ret) {
5316 rollback_registered(dev);
5317 dev->reg_state = NETREG_UNREGISTERED;
5318 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005319 /*
5320 * Prevent userspace races by waiting until the network
5321 * device is fully setup before sending notifications.
5322 */
Patrick McHardya2835762010-02-26 06:34:51 +00005323 if (!dev->rtnl_link_ops ||
5324 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5325 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005326
5327out:
5328 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005329
5330err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005331 if (dev->netdev_ops->ndo_uninit)
5332 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005333 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005335EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005336
5337/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005338 * init_dummy_netdev - init a dummy network device for NAPI
5339 * @dev: device to init
5340 *
5341 * This takes a network device structure and initialize the minimum
5342 * amount of fields so it can be used to schedule NAPI polls without
5343 * registering a full blown interface. This is to be used by drivers
5344 * that need to tie several hardware interfaces to a single NAPI
5345 * poll scheduler due to HW limitations.
5346 */
5347int init_dummy_netdev(struct net_device *dev)
5348{
5349 /* Clear everything. Note we don't initialize spinlocks
5350 * are they aren't supposed to be taken by any of the
5351 * NAPI code and this dummy netdev is supposed to be
5352 * only ever used for NAPI polls
5353 */
5354 memset(dev, 0, sizeof(struct net_device));
5355
5356 /* make sure we BUG if trying to hit standard
5357 * register/unregister code path
5358 */
5359 dev->reg_state = NETREG_DUMMY;
5360
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005361 /* NAPI wants this */
5362 INIT_LIST_HEAD(&dev->napi_list);
5363
5364 /* a dummy interface is started by default */
5365 set_bit(__LINK_STATE_PRESENT, &dev->state);
5366 set_bit(__LINK_STATE_START, &dev->state);
5367
Eric Dumazet29b44332010-10-11 10:22:12 +00005368 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5369 * because users of this 'device' dont need to change
5370 * its refcount.
5371 */
5372
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005373 return 0;
5374}
5375EXPORT_SYMBOL_GPL(init_dummy_netdev);
5376
5377
5378/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379 * register_netdev - register a network device
5380 * @dev: device to register
5381 *
5382 * Take a completed network device structure and add it to the kernel
5383 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5384 * chain. 0 is returned on success. A negative errno code is returned
5385 * on a failure to set up the device, or if the name is a duplicate.
5386 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005387 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388 * and expands the device name if you passed a format string to
5389 * alloc_netdev.
5390 */
5391int register_netdev(struct net_device *dev)
5392{
5393 int err;
5394
5395 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005397 rtnl_unlock();
5398 return err;
5399}
5400EXPORT_SYMBOL(register_netdev);
5401
Eric Dumazet29b44332010-10-11 10:22:12 +00005402int netdev_refcnt_read(const struct net_device *dev)
5403{
5404 int i, refcnt = 0;
5405
5406 for_each_possible_cpu(i)
5407 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5408 return refcnt;
5409}
5410EXPORT_SYMBOL(netdev_refcnt_read);
5411
Ben Hutchings2c530402012-07-10 10:55:09 +00005412/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005414 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415 *
5416 * This is called when unregistering network devices.
5417 *
5418 * Any protocol or device that holds a reference should register
5419 * for netdevice notification, and cleanup and put back the
5420 * reference if they receive an UNREGISTER event.
5421 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005422 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 */
5424static void netdev_wait_allrefs(struct net_device *dev)
5425{
5426 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00005427 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428
Eric Dumazete014deb2009-11-17 05:59:21 +00005429 linkwatch_forget_dev(dev);
5430
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00005432 refcnt = netdev_refcnt_read(dev);
5433
5434 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005436 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437
5438 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005439 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440
Eric Dumazet748e2d92012-08-22 21:50:59 +00005441 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005442 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00005443 rtnl_lock();
5444
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005445 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5447 &dev->state)) {
5448 /* We must not have linkwatch events
5449 * pending on unregister. If this
5450 * happens, we simply run the queue
5451 * unscheduled, resulting in a noop
5452 * for this device.
5453 */
5454 linkwatch_run_queue();
5455 }
5456
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005457 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458
5459 rebroadcast_time = jiffies;
5460 }
5461
5462 msleep(250);
5463
Eric Dumazet29b44332010-10-11 10:22:12 +00005464 refcnt = netdev_refcnt_read(dev);
5465
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005467 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5468 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469 warning_time = jiffies;
5470 }
5471 }
5472}
5473
5474/* The sequence is:
5475 *
5476 * rtnl_lock();
5477 * ...
5478 * register_netdevice(x1);
5479 * register_netdevice(x2);
5480 * ...
5481 * unregister_netdevice(y1);
5482 * unregister_netdevice(y2);
5483 * ...
5484 * rtnl_unlock();
5485 * free_netdev(y1);
5486 * free_netdev(y2);
5487 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005488 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005490 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005491 * without deadlocking with linkwatch via keventd.
5492 * 2) Since we run with the RTNL semaphore not held, we can sleep
5493 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005494 *
5495 * We must not return until all unregister events added during
5496 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005497 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005498void netdev_run_todo(void)
5499{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005500 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005501
Linus Torvalds1da177e2005-04-16 15:20:36 -07005502 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005503 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005504
5505 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005506
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005507
5508 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00005509 if (!list_empty(&list))
5510 rcu_barrier();
5511
Linus Torvalds1da177e2005-04-16 15:20:36 -07005512 while (!list_empty(&list)) {
5513 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005514 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005515 list_del(&dev->todo_list);
5516
Eric Dumazet748e2d92012-08-22 21:50:59 +00005517 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005518 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00005519 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005520
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005521 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005522 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07005523 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005524 dump_stack();
5525 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005526 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005527
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005528 dev->reg_state = NETREG_UNREGISTERED;
5529
Changli Gao152102c2010-03-30 20:16:22 +00005530 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005531
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005532 netdev_wait_allrefs(dev);
5533
5534 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00005535 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00005536 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5537 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005538 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005539
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005540 if (dev->destructor)
5541 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005542
5543 /* Free network device */
5544 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546}
5547
Ben Hutchings3cfde792010-07-09 09:11:52 +00005548/* Convert net_device_stats to rtnl_link_stats64. They have the same
5549 * fields in the same order, with only the type differing.
5550 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005551void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5552 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00005553{
5554#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005555 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5556 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00005557#else
5558 size_t i, n = sizeof(*stats64) / sizeof(u64);
5559 const unsigned long *src = (const unsigned long *)netdev_stats;
5560 u64 *dst = (u64 *)stats64;
5561
5562 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5563 sizeof(*stats64) / sizeof(u64));
5564 for (i = 0; i < n; i++)
5565 dst[i] = src[i];
5566#endif
5567}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005568EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00005569
Eric Dumazetd83345a2009-11-16 03:36:51 +00005570/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005571 * dev_get_stats - get network device statistics
5572 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07005573 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005574 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00005575 * Get network statistics from device. Return @storage.
5576 * The device driver may provide its own method by setting
5577 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5578 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005579 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00005580struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5581 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005582{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005583 const struct net_device_ops *ops = dev->netdev_ops;
5584
Eric Dumazet28172732010-07-07 14:58:56 -07005585 if (ops->ndo_get_stats64) {
5586 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005587 ops->ndo_get_stats64(dev, storage);
5588 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00005589 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005590 } else {
5591 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07005592 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005593 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07005594 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07005595}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005596EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005597
Eric Dumazet24824a02010-10-02 06:11:55 +00005598struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07005599{
Eric Dumazet24824a02010-10-02 06:11:55 +00005600 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07005601
Eric Dumazet24824a02010-10-02 06:11:55 +00005602#ifdef CONFIG_NET_CLS_ACT
5603 if (queue)
5604 return queue;
5605 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5606 if (!queue)
5607 return NULL;
5608 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00005609 queue->qdisc = &noop_qdisc;
5610 queue->qdisc_sleeping = &noop_qdisc;
5611 rcu_assign_pointer(dev->ingress_queue, queue);
5612#endif
5613 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07005614}
5615
Eric Dumazet2c60db02012-09-16 09:17:26 +00005616static const struct ethtool_ops default_ethtool_ops;
5617
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00005618void netdev_set_default_ethtool_ops(struct net_device *dev,
5619 const struct ethtool_ops *ops)
5620{
5621 if (dev->ethtool_ops == &default_ethtool_ops)
5622 dev->ethtool_ops = ops;
5623}
5624EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
5625
Linus Torvalds1da177e2005-04-16 15:20:36 -07005626/**
Tom Herbert36909ea2011-01-09 19:36:31 +00005627 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005628 * @sizeof_priv: size of private data to allocate space for
5629 * @name: device name format string
5630 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00005631 * @txqs: the number of TX subqueues to allocate
5632 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633 *
5634 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005635 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00005636 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637 */
Tom Herbert36909ea2011-01-09 19:36:31 +00005638struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5639 void (*setup)(struct net_device *),
5640 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005643 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005644 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005646 BUG_ON(strlen(name) >= sizeof(dev->name));
5647
Tom Herbert36909ea2011-01-09 19:36:31 +00005648 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005649 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00005650 return NULL;
5651 }
5652
Tom Herbert36909ea2011-01-09 19:36:31 +00005653#ifdef CONFIG_RPS
5654 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005655 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00005656 return NULL;
5657 }
5658#endif
5659
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005660 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005661 if (sizeof_priv) {
5662 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005663 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005664 alloc_size += sizeof_priv;
5665 }
5666 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005667 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005669 p = kzalloc(alloc_size, GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005670 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005672
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005673 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005675
Eric Dumazet29b44332010-10-11 10:22:12 +00005676 dev->pcpu_refcnt = alloc_percpu(int);
5677 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00005678 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005679
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00005681 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005682
Jiri Pirko22bedad32010-04-01 21:22:57 +00005683 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005684 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00005685
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005686 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005687
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005688 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00005689 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005690
Herbert Xud565b0a2008-12-15 23:38:52 -08005691 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005692 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005693 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005694 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005695 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005696 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005697
5698 dev->num_tx_queues = txqs;
5699 dev->real_num_tx_queues = txqs;
5700 if (netif_alloc_netdev_queues(dev))
5701 goto free_all;
5702
5703#ifdef CONFIG_RPS
5704 dev->num_rx_queues = rxqs;
5705 dev->real_num_rx_queues = rxqs;
5706 if (netif_alloc_rx_queues(dev))
5707 goto free_all;
5708#endif
5709
Linus Torvalds1da177e2005-04-16 15:20:36 -07005710 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005711 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00005712 if (!dev->ethtool_ops)
5713 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005714 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005715
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005716free_all:
5717 free_netdev(dev);
5718 return NULL;
5719
Eric Dumazet29b44332010-10-11 10:22:12 +00005720free_pcpu:
5721 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00005722 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005723#ifdef CONFIG_RPS
5724 kfree(dev->_rx);
5725#endif
5726
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005727free_p:
5728 kfree(p);
5729 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730}
Tom Herbert36909ea2011-01-09 19:36:31 +00005731EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005732
5733/**
5734 * free_netdev - free network device
5735 * @dev: device
5736 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005737 * This function does the last stage of destroying an allocated device
5738 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005739 * If this is the last reference then it will be freed.
5740 */
5741void free_netdev(struct net_device *dev)
5742{
Herbert Xud565b0a2008-12-15 23:38:52 -08005743 struct napi_struct *p, *n;
5744
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005745 release_net(dev_net(dev));
5746
David S. Millere8a04642008-07-17 00:34:19 -07005747 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005748#ifdef CONFIG_RPS
5749 kfree(dev->_rx);
5750#endif
David S. Millere8a04642008-07-17 00:34:19 -07005751
Eric Dumazet33d480c2011-08-11 19:30:52 +00005752 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00005753
Jiri Pirkof001fde2009-05-05 02:48:28 +00005754 /* Flush device addresses */
5755 dev_addr_flush(dev);
5756
Herbert Xud565b0a2008-12-15 23:38:52 -08005757 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5758 netif_napi_del(p);
5759
Eric Dumazet29b44332010-10-11 10:22:12 +00005760 free_percpu(dev->pcpu_refcnt);
5761 dev->pcpu_refcnt = NULL;
5762
Stephen Hemminger3041a062006-05-26 13:25:24 -07005763 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005764 if (dev->reg_state == NETREG_UNINITIALIZED) {
5765 kfree((char *)dev - dev->padded);
5766 return;
5767 }
5768
5769 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5770 dev->reg_state = NETREG_RELEASED;
5771
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005772 /* will free via device release */
5773 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005774}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005775EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005776
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005777/**
5778 * synchronize_net - Synchronize with packet receive processing
5779 *
5780 * Wait for packets currently being received to be done.
5781 * Does not block later packets from starting.
5782 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005783void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784{
5785 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00005786 if (rtnl_is_locked())
5787 synchronize_rcu_expedited();
5788 else
5789 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005791EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792
5793/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005794 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005795 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005796 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005797 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005799 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005800 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005801 *
5802 * Callers must hold the rtnl semaphore. You may want
5803 * unregister_netdev() instead of this.
5804 */
5805
Eric Dumazet44a08732009-10-27 07:03:04 +00005806void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005807{
Herbert Xua6620712007-12-12 19:21:56 -08005808 ASSERT_RTNL();
5809
Eric Dumazet44a08732009-10-27 07:03:04 +00005810 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005811 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005812 } else {
5813 rollback_registered(dev);
5814 /* Finish processing unregister after unlock */
5815 net_set_todo(dev);
5816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817}
Eric Dumazet44a08732009-10-27 07:03:04 +00005818EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005819
5820/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005821 * unregister_netdevice_many - unregister many devices
5822 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005823 */
5824void unregister_netdevice_many(struct list_head *head)
5825{
5826 struct net_device *dev;
5827
5828 if (!list_empty(head)) {
5829 rollback_registered_many(head);
5830 list_for_each_entry(dev, head, unreg_list)
5831 net_set_todo(dev);
5832 }
5833}
Eric Dumazet63c80992009-10-27 07:06:49 +00005834EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005835
5836/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005837 * unregister_netdev - remove device from the kernel
5838 * @dev: device
5839 *
5840 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005841 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842 *
5843 * This is just a wrapper for unregister_netdevice that takes
5844 * the rtnl semaphore. In general you want to use this and not
5845 * unregister_netdevice.
5846 */
5847void unregister_netdev(struct net_device *dev)
5848{
5849 rtnl_lock();
5850 unregister_netdevice(dev);
5851 rtnl_unlock();
5852}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005853EXPORT_SYMBOL(unregister_netdev);
5854
Eric W. Biedermance286d32007-09-12 13:53:49 +02005855/**
5856 * dev_change_net_namespace - move device to different nethost namespace
5857 * @dev: device
5858 * @net: network namespace
5859 * @pat: If not NULL name pattern to try if the current device name
5860 * is already taken in the destination network namespace.
5861 *
5862 * This function shuts down a device interface and moves it
5863 * to a new network namespace. On success 0 is returned, on
5864 * a failure a netagive errno code is returned.
5865 *
5866 * Callers must hold the rtnl semaphore.
5867 */
5868
5869int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5870{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005871 int err;
5872
5873 ASSERT_RTNL();
5874
5875 /* Don't allow namespace local devices to be moved. */
5876 err = -EINVAL;
5877 if (dev->features & NETIF_F_NETNS_LOCAL)
5878 goto out;
5879
5880 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02005881 if (dev->reg_state != NETREG_REGISTERED)
5882 goto out;
5883
5884 /* Get out if there is nothing todo */
5885 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005886 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005887 goto out;
5888
5889 /* Pick the destination device name, and ensure
5890 * we can use it in the destination network namespace.
5891 */
5892 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005893 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005894 /* We get here if we can't use the current device name */
5895 if (!pat)
5896 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00005897 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005898 goto out;
5899 }
5900
5901 /*
5902 * And now a mini version of register_netdevice unregister_netdevice.
5903 */
5904
5905 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005906 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005907
5908 /* And unlink it from device chain */
5909 err = -ENODEV;
5910 unlist_netdevice(dev);
5911
5912 synchronize_net();
5913
5914 /* Shutdown queueing discipline. */
5915 dev_shutdown(dev);
5916
5917 /* Notify protocols, that we are about to destroy
5918 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00005919
5920 Note that dev->reg_state stays at NETREG_REGISTERED.
5921 This is wanted because this way 8021q and macvlan know
5922 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02005923 */
5924 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00005925 rcu_barrier();
5926 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00005927 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005928
5929 /*
5930 * Flush the unicast and multicast chains
5931 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005932 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005933 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005934
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005935 /* Send a netdev-removed uevent to the old namespace */
5936 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
5937
Eric W. Biedermance286d32007-09-12 13:53:49 +02005938 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005939 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005940
Eric W. Biedermance286d32007-09-12 13:53:49 +02005941 /* If there is an ifindex conflict assign a new one */
5942 if (__dev_get_by_index(net, dev->ifindex)) {
5943 int iflink = (dev->iflink == dev->ifindex);
5944 dev->ifindex = dev_new_index(net);
5945 if (iflink)
5946 dev->iflink = dev->ifindex;
5947 }
5948
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005949 /* Send a netdev-add uevent to the new namespace */
5950 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
5951
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005952 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07005953 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005954 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005955
5956 /* Add the device back in the hashes */
5957 list_netdevice(dev);
5958
5959 /* Notify protocols, that a new device appeared. */
5960 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5961
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005962 /*
5963 * Prevent userspace races by waiting until the network
5964 * device is fully setup before sending notifications.
5965 */
5966 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5967
Eric W. Biedermance286d32007-09-12 13:53:49 +02005968 synchronize_net();
5969 err = 0;
5970out:
5971 return err;
5972}
Johannes Berg463d0182009-07-14 00:33:35 +02005973EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005974
Linus Torvalds1da177e2005-04-16 15:20:36 -07005975static int dev_cpu_callback(struct notifier_block *nfb,
5976 unsigned long action,
5977 void *ocpu)
5978{
5979 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005980 struct sk_buff *skb;
5981 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5982 struct softnet_data *sd, *oldsd;
5983
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005984 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985 return NOTIFY_OK;
5986
5987 local_irq_disable();
5988 cpu = smp_processor_id();
5989 sd = &per_cpu(softnet_data, cpu);
5990 oldsd = &per_cpu(softnet_data, oldcpu);
5991
5992 /* Find end of our completion_queue. */
5993 list_skb = &sd->completion_queue;
5994 while (*list_skb)
5995 list_skb = &(*list_skb)->next;
5996 /* Append completion queue from offline CPU. */
5997 *list_skb = oldsd->completion_queue;
5998 oldsd->completion_queue = NULL;
5999
Linus Torvalds1da177e2005-04-16 15:20:36 -07006000 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006001 if (oldsd->output_queue) {
6002 *sd->output_queue_tailp = oldsd->output_queue;
6003 sd->output_queue_tailp = oldsd->output_queue_tailp;
6004 oldsd->output_queue = NULL;
6005 oldsd->output_queue_tailp = &oldsd->output_queue;
6006 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006007 /* Append NAPI poll list from offline CPU. */
6008 if (!list_empty(&oldsd->poll_list)) {
6009 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6010 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006012
6013 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6014 local_irq_enable();
6015
6016 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006017 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6018 netif_rx(skb);
6019 input_queue_head_incr(oldsd);
6020 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006021 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006022 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006023 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006024 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006025
6026 return NOTIFY_OK;
6027}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006028
6029
Herbert Xu7f353bf2007-08-10 15:47:58 -07006030/**
Herbert Xub63365a2008-10-23 01:11:29 -07006031 * netdev_increment_features - increment feature set by one
6032 * @all: current feature set
6033 * @one: new feature set
6034 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006035 *
6036 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006037 * @one to the master device with current feature set @all. Will not
6038 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006039 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006040netdev_features_t netdev_increment_features(netdev_features_t all,
6041 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006042{
Michał Mirosław1742f182011-04-22 06:31:16 +00006043 if (mask & NETIF_F_GEN_CSUM)
6044 mask |= NETIF_F_ALL_CSUM;
6045 mask |= NETIF_F_VLAN_CHALLENGED;
6046
6047 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6048 all &= one | ~NETIF_F_ALL_FOR_ALL;
6049
Michał Mirosław1742f182011-04-22 06:31:16 +00006050 /* If one device supports hw checksumming, set for all. */
6051 if (all & NETIF_F_GEN_CSUM)
6052 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006053
6054 return all;
6055}
Herbert Xub63365a2008-10-23 01:11:29 -07006056EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006057
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006058static struct hlist_head *netdev_create_hash(void)
6059{
6060 int i;
6061 struct hlist_head *hash;
6062
6063 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6064 if (hash != NULL)
6065 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6066 INIT_HLIST_HEAD(&hash[i]);
6067
6068 return hash;
6069}
6070
Eric W. Biederman881d9662007-09-17 11:56:21 -07006071/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006072static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006073{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006074 if (net != &init_net)
6075 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006076
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006077 net->dev_name_head = netdev_create_hash();
6078 if (net->dev_name_head == NULL)
6079 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006080
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006081 net->dev_index_head = netdev_create_hash();
6082 if (net->dev_index_head == NULL)
6083 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006084
6085 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006086
6087err_idx:
6088 kfree(net->dev_name_head);
6089err_name:
6090 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006091}
6092
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006093/**
6094 * netdev_drivername - network driver for the device
6095 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006096 *
6097 * Determine network driver for device.
6098 */
David S. Miller3019de12011-06-06 16:41:33 -07006099const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006100{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006101 const struct device_driver *driver;
6102 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006103 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006104
6105 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006106 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006107 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006108
6109 driver = parent->driver;
6110 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006111 return driver->name;
6112 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006113}
6114
Joe Perchesb004ff42012-09-12 20:12:19 -07006115static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006116 struct va_format *vaf)
6117{
6118 int r;
6119
Joe Perchesb004ff42012-09-12 20:12:19 -07006120 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006121 r = dev_printk_emit(level[1] - '0',
6122 dev->dev.parent,
6123 "%s %s %s: %pV",
6124 dev_driver_string(dev->dev.parent),
6125 dev_name(dev->dev.parent),
6126 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006127 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006128 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006129 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006130 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006131 }
Joe Perches256df2f2010-06-27 01:02:35 +00006132
6133 return r;
6134}
6135
6136int netdev_printk(const char *level, const struct net_device *dev,
6137 const char *format, ...)
6138{
6139 struct va_format vaf;
6140 va_list args;
6141 int r;
6142
6143 va_start(args, format);
6144
6145 vaf.fmt = format;
6146 vaf.va = &args;
6147
6148 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006149
Joe Perches256df2f2010-06-27 01:02:35 +00006150 va_end(args);
6151
6152 return r;
6153}
6154EXPORT_SYMBOL(netdev_printk);
6155
6156#define define_netdev_printk_level(func, level) \
6157int func(const struct net_device *dev, const char *fmt, ...) \
6158{ \
6159 int r; \
6160 struct va_format vaf; \
6161 va_list args; \
6162 \
6163 va_start(args, fmt); \
6164 \
6165 vaf.fmt = fmt; \
6166 vaf.va = &args; \
6167 \
6168 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006169 \
Joe Perches256df2f2010-06-27 01:02:35 +00006170 va_end(args); \
6171 \
6172 return r; \
6173} \
6174EXPORT_SYMBOL(func);
6175
6176define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6177define_netdev_printk_level(netdev_alert, KERN_ALERT);
6178define_netdev_printk_level(netdev_crit, KERN_CRIT);
6179define_netdev_printk_level(netdev_err, KERN_ERR);
6180define_netdev_printk_level(netdev_warn, KERN_WARNING);
6181define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6182define_netdev_printk_level(netdev_info, KERN_INFO);
6183
Pavel Emelyanov46650792007-10-08 20:38:39 -07006184static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006185{
6186 kfree(net->dev_name_head);
6187 kfree(net->dev_index_head);
6188}
6189
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006190static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006191 .init = netdev_init,
6192 .exit = netdev_exit,
6193};
6194
Pavel Emelyanov46650792007-10-08 20:38:39 -07006195static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006196{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006197 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006198 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006199 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006200 * initial network namespace
6201 */
6202 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006203 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006204 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006205 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006206
6207 /* Ignore unmoveable devices (i.e. loopback) */
6208 if (dev->features & NETIF_F_NETNS_LOCAL)
6209 continue;
6210
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006211 /* Leave virtual devices for the generic cleanup */
6212 if (dev->rtnl_link_ops)
6213 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006214
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006215 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006216 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6217 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006218 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006219 pr_emerg("%s: failed to move %s to init_net: %d\n",
6220 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006221 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006222 }
6223 }
6224 rtnl_unlock();
6225}
6226
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006227static void __net_exit default_device_exit_batch(struct list_head *net_list)
6228{
6229 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006230 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006231 * Do this across as many network namespaces as possible to
6232 * improve batching efficiency.
6233 */
6234 struct net_device *dev;
6235 struct net *net;
6236 LIST_HEAD(dev_kill_list);
6237
6238 rtnl_lock();
6239 list_for_each_entry(net, net_list, exit_list) {
6240 for_each_netdev_reverse(net, dev) {
6241 if (dev->rtnl_link_ops)
6242 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6243 else
6244 unregister_netdevice_queue(dev, &dev_kill_list);
6245 }
6246 }
6247 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006248 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006249 rtnl_unlock();
6250}
6251
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006252static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006253 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006254 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006255};
6256
Linus Torvalds1da177e2005-04-16 15:20:36 -07006257/*
6258 * Initialize the DEV module. At boot time this walks the device list and
6259 * unhooks any devices that fail to initialise (normally hardware not
6260 * present) and leaves us with a valid list of present and active devices.
6261 *
6262 */
6263
6264/*
6265 * This is called single threaded during boot, so no need
6266 * to take the rtnl semaphore.
6267 */
6268static int __init net_dev_init(void)
6269{
6270 int i, rc = -ENOMEM;
6271
6272 BUG_ON(!dev_boot_phase);
6273
Linus Torvalds1da177e2005-04-16 15:20:36 -07006274 if (dev_proc_init())
6275 goto out;
6276
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006277 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006278 goto out;
6279
6280 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006281 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006282 INIT_LIST_HEAD(&ptype_base[i]);
6283
Vlad Yasevich62532da2012-11-15 08:49:10 +00006284 INIT_LIST_HEAD(&offload_base);
6285
Eric W. Biederman881d9662007-09-17 11:56:21 -07006286 if (register_pernet_subsys(&netdev_net_ops))
6287 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006288
6289 /*
6290 * Initialise the packet receive queues.
6291 */
6292
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006293 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006294 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295
Changli Gaodee42872010-05-02 05:42:16 +00006296 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006297 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006298 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006299 sd->completion_queue = NULL;
6300 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006301 sd->output_queue = NULL;
6302 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006303#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006304 sd->csd.func = rps_trigger_softirq;
6305 sd->csd.info = sd;
6306 sd->csd.flags = 0;
6307 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006308#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006309
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006310 sd->backlog.poll = process_backlog;
6311 sd->backlog.weight = weight_p;
6312 sd->backlog.gro_list = NULL;
6313 sd->backlog.gro_count = 0;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00006314
6315#ifdef CONFIG_NET_FLOW_LIMIT
6316 sd->flow_limit = NULL;
6317#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006318 }
6319
Linus Torvalds1da177e2005-04-16 15:20:36 -07006320 dev_boot_phase = 0;
6321
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006322 /* The loopback device is special if any other network devices
6323 * is present in a network namespace the loopback device must
6324 * be present. Since we now dynamically allocate and free the
6325 * loopback device ensure this invariant is maintained by
6326 * keeping the loopback device as the first device on the
6327 * list of network devices. Ensuring the loopback devices
6328 * is the first device that appears and the last network device
6329 * that disappears.
6330 */
6331 if (register_pernet_device(&loopback_net_ops))
6332 goto out;
6333
6334 if (register_pernet_device(&default_device_ops))
6335 goto out;
6336
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006337 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6338 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006339
6340 hotcpu_notifier(dev_cpu_callback, 0);
6341 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342 rc = 0;
6343out:
6344 return rc;
6345}
6346
6347subsys_initcall(net_dev_init);