blob: 41d5120df4695558f9b30155ab656d304082b002 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/dst.h>
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900130#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900131#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000132#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700133#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000134#include <linux/cpu_rmap.h>
Richard Cochran4dc360c2011-10-19 17:00:35 -0400135#include <linux/net_tstamp.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100136#include <linux/static_key.h>
Eric Dumazet4504b862011-11-28 05:23:23 +0000137#include <net/flow_keys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700139#include "net-sysfs.h"
140
Herbert Xud565b0a2008-12-15 23:38:52 -0800141/* Instead of increasing this, you should create a hash table. */
142#define MAX_GRO_SKBS 8
143
Herbert Xu5d38a072009-01-04 16:13:40 -0800144/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128)
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
150 *
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
153 *
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700158 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 * --BLG
160 *
161 * 0800 IP
162 * 8100 802.1Q VLAN
163 * 0001 802.3
164 * 0002 AX.25
165 * 0004 802.2
166 * 8035 RARP
167 * 0005 SNAP
168 * 0805 X.25
169 * 0806 ARP
170 * 8137 IPX
171 * 0009 Localtalk
172 * 86DD IPv6
173 */
174
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800175#define PTYPE_HASH_SIZE (16)
176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000179static DEFINE_SPINLOCK(offload_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800180static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700181static struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000182static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * semaphore.
187 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800188 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 *
190 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700191 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
194 *
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
198 *
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
201 * semaphore held.
202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204EXPORT_SYMBOL(dev_base_lock);
205
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000206seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000207
Thomas Graf4e985ad2011-06-21 03:11:20 +0000208static inline void dev_base_seq_inc(struct net *net)
209{
210 while (++net->dev_base_seq == 0);
211}
212
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Eric Dumazet95c96172012-04-15 05:58:06 +0000215 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
216
stephen hemminger08e98972009-11-10 07:20:34 +0000217 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Eric W. Biederman881d9662007-09-17 11:56:21 -0700220static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700222 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000225static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000226{
227#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000228 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000229#endif
230}
231
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000232static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000233{
234#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000235 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000236#endif
237}
238
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239/* Device list insertion */
240static int list_netdevice(struct net_device *dev)
241{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900242 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243
244 ASSERT_RTNL();
245
246 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800247 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000248 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000249 hlist_add_head_rcu(&dev->index_hlist,
250 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200251 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000252
253 dev_base_seq_inc(net);
254
Eric W. Biedermance286d32007-09-12 13:53:49 +0200255 return 0;
256}
257
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000258/* Device list removal
259 * caller must respect a RCU grace period before freeing/reusing dev
260 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200261static void unlist_netdevice(struct net_device *dev)
262{
263 ASSERT_RTNL();
264
265 /* Unlink dev from the device chain */
266 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800267 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000268 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000269 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200270 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000271
272 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275/*
276 * Our notifier list
277 */
278
Alan Sternf07d5b92006-05-09 15:23:03 -0700279static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281/*
282 * Device drivers call our routines to queue packets here. We empty the
283 * queue in the local softnet handler.
284 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700285
Eric Dumazet9958da02010-04-17 04:17:02 +0000286DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700287EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
David S. Millercf508b12008-07-22 14:16:42 -0700289#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290/*
David S. Millerc773e842008-07-08 23:13:53 -0700291 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292 * according to dev->type
293 */
294static const unsigned short netdev_lock_type[] =
295 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
296 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
297 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
298 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
299 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
300 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
301 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
302 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
303 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
304 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
305 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
306 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400307 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
308 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
309 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700310
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700311static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700312 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
313 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
314 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
315 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
316 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
317 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
318 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
319 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
320 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
321 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
322 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
323 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400324 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
325 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
326 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700327
328static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700329static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330
331static inline unsigned short netdev_lock_pos(unsigned short dev_type)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
336 if (netdev_lock_type[i] == dev_type)
337 return i;
338 /* the last key is used by default */
339 return ARRAY_SIZE(netdev_lock_type) - 1;
340}
341
David S. Millercf508b12008-07-22 14:16:42 -0700342static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
343 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700344{
345 int i;
346
347 i = netdev_lock_pos(dev_type);
348 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
349 netdev_lock_name[i]);
350}
David S. Millercf508b12008-07-22 14:16:42 -0700351
352static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
353{
354 int i;
355
356 i = netdev_lock_pos(dev->type);
357 lockdep_set_class_and_name(&dev->addr_list_lock,
358 &netdev_addr_lock_key[i],
359 netdev_lock_name[i]);
360}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700361#else
David S. Millercf508b12008-07-22 14:16:42 -0700362static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
363 unsigned short dev_type)
364{
365}
366static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700367{
368}
369#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371/*******************************************************************************
372
373 Protocol management and registration routines
374
375*******************************************************************************/
376
377/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 * Add a protocol ID to the list. Now that the input handler is
379 * smarter we can dispense with all the messy stuff that used to be
380 * here.
381 *
382 * BEWARE!!! Protocol handlers, mangling input packets,
383 * MUST BE last in hash buckets and checking protocol handlers
384 * MUST start from promiscuous ptype_all chain in net_bh.
385 * It is true now, do not change it.
386 * Explanation follows: if protocol handler, mangling packet, will
387 * be the first on list, it is not able to sense, that packet
388 * is cloned and should be copied-on-write, so that it will
389 * change it and subsequent readers will get broken packet.
390 * --ANK (980803)
391 */
392
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000393static inline struct list_head *ptype_head(const struct packet_type *pt)
394{
395 if (pt->type == htons(ETH_P_ALL))
396 return &ptype_all;
397 else
398 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401/**
402 * dev_add_pack - add packet handler
403 * @pt: packet type declaration
404 *
405 * Add a protocol handler to the networking stack. The passed &packet_type
406 * is linked into kernel lists and may not be freed until it has been
407 * removed from the kernel lists.
408 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900409 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 * guarantee all CPU's that are in middle of receiving packets
411 * will see the new packet type (until the next received packet).
412 */
413
414void dev_add_pack(struct packet_type *pt)
415{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000416 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000418 spin_lock(&ptype_lock);
419 list_add_rcu(&pt->list, head);
420 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * __dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900431 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 *
433 * The packet type might still be in use by receivers
434 * and must not be freed until after all the CPU's have gone
435 * through a quiescent state.
436 */
437void __dev_remove_pack(struct packet_type *pt)
438{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000439 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 struct packet_type *pt1;
441
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000442 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 list_for_each_entry(pt1, head, list) {
445 if (pt == pt1) {
446 list_del_rcu(&pt->list);
447 goto out;
448 }
449 }
450
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000451 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000453 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700455EXPORT_SYMBOL(__dev_remove_pack);
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/**
458 * dev_remove_pack - remove packet handler
459 * @pt: packet type declaration
460 *
461 * Remove a protocol handler that was previously added to the kernel
462 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
463 * from the kernel lists and can be freed or reused once this function
464 * returns.
465 *
466 * This call sleeps to guarantee that no CPU is looking at the packet
467 * type after return.
468 */
469void dev_remove_pack(struct packet_type *pt)
470{
471 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 synchronize_net();
474}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700475EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Vlad Yasevich62532da2012-11-15 08:49:10 +0000477
478/**
479 * dev_add_offload - register offload handlers
480 * @po: protocol offload declaration
481 *
482 * Add protocol offload handlers to the networking stack. The passed
483 * &proto_offload is linked into kernel lists and may not be freed until
484 * it has been removed from the kernel lists.
485 *
486 * This call does not sleep therefore it can not
487 * guarantee all CPU's that are in middle of receiving packets
488 * will see the new offload handlers (until the next received packet).
489 */
490void dev_add_offload(struct packet_offload *po)
491{
492 struct list_head *head = &offload_base;
493
494 spin_lock(&offload_lock);
495 list_add_rcu(&po->list, head);
496 spin_unlock(&offload_lock);
497}
498EXPORT_SYMBOL(dev_add_offload);
499
500/**
501 * __dev_remove_offload - remove offload handler
502 * @po: packet offload declaration
503 *
504 * Remove a protocol offload handler that was previously added to the
505 * kernel offload handlers by dev_add_offload(). The passed &offload_type
506 * is removed from the kernel lists and can be freed or reused once this
507 * function returns.
508 *
509 * The packet type might still be in use by receivers
510 * and must not be freed until after all the CPU's have gone
511 * through a quiescent state.
512 */
513void __dev_remove_offload(struct packet_offload *po)
514{
515 struct list_head *head = &offload_base;
516 struct packet_offload *po1;
517
Eric Dumazetc53aa502012-11-16 08:08:23 +0000518 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000519
520 list_for_each_entry(po1, head, list) {
521 if (po == po1) {
522 list_del_rcu(&po->list);
523 goto out;
524 }
525 }
526
527 pr_warn("dev_remove_offload: %p not found\n", po);
528out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000529 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000530}
531EXPORT_SYMBOL(__dev_remove_offload);
532
533/**
534 * dev_remove_offload - remove packet offload handler
535 * @po: packet offload declaration
536 *
537 * Remove a packet offload handler that was previously added to the kernel
538 * offload handlers by dev_add_offload(). The passed &offload_type is
539 * removed from the kernel lists and can be freed or reused once this
540 * function returns.
541 *
542 * This call sleeps to guarantee that no CPU is looking at the packet
543 * type after return.
544 */
545void dev_remove_offload(struct packet_offload *po)
546{
547 __dev_remove_offload(po);
548
549 synchronize_net();
550}
551EXPORT_SYMBOL(dev_remove_offload);
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553/******************************************************************************
554
555 Device Boot-time Settings Routines
556
557*******************************************************************************/
558
559/* Boot time configuration table */
560static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
561
562/**
563 * netdev_boot_setup_add - add new setup entry
564 * @name: name of the device
565 * @map: configured settings for the device
566 *
567 * Adds new setup entry to the dev_boot_setup list. The function
568 * returns 0 on error and 1 on success. This is a generic routine to
569 * all netdevices.
570 */
571static int netdev_boot_setup_add(char *name, struct ifmap *map)
572{
573 struct netdev_boot_setup *s;
574 int i;
575
576 s = dev_boot_setup;
577 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
578 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
579 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700580 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 memcpy(&s[i].map, map, sizeof(s[i].map));
582 break;
583 }
584 }
585
586 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
587}
588
589/**
590 * netdev_boot_setup_check - check boot time settings
591 * @dev: the netdevice
592 *
593 * Check boot time settings for the device.
594 * The found settings are set for the device to be used
595 * later in the device probing.
596 * Returns 0 if no settings found, 1 if they are.
597 */
598int netdev_boot_setup_check(struct net_device *dev)
599{
600 struct netdev_boot_setup *s = dev_boot_setup;
601 int i;
602
603 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
604 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700605 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 dev->irq = s[i].map.irq;
607 dev->base_addr = s[i].map.base_addr;
608 dev->mem_start = s[i].map.mem_start;
609 dev->mem_end = s[i].map.mem_end;
610 return 1;
611 }
612 }
613 return 0;
614}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700615EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617
618/**
619 * netdev_boot_base - get address from boot time settings
620 * @prefix: prefix for network device
621 * @unit: id for network device
622 *
623 * Check boot time settings for the base address of device.
624 * The found settings are set for the device to be used
625 * later in the device probing.
626 * Returns 0 if no settings found.
627 */
628unsigned long netdev_boot_base(const char *prefix, int unit)
629{
630 const struct netdev_boot_setup *s = dev_boot_setup;
631 char name[IFNAMSIZ];
632 int i;
633
634 sprintf(name, "%s%d", prefix, unit);
635
636 /*
637 * If device already registered then return base of 1
638 * to indicate not to probe for this interface
639 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700640 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return 1;
642
643 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
644 if (!strcmp(name, s[i].name))
645 return s[i].map.base_addr;
646 return 0;
647}
648
649/*
650 * Saves at boot time configured settings for any netdevice.
651 */
652int __init netdev_boot_setup(char *str)
653{
654 int ints[5];
655 struct ifmap map;
656
657 str = get_options(str, ARRAY_SIZE(ints), ints);
658 if (!str || !*str)
659 return 0;
660
661 /* Save settings */
662 memset(&map, 0, sizeof(map));
663 if (ints[0] > 0)
664 map.irq = ints[1];
665 if (ints[0] > 1)
666 map.base_addr = ints[2];
667 if (ints[0] > 2)
668 map.mem_start = ints[3];
669 if (ints[0] > 3)
670 map.mem_end = ints[4];
671
672 /* Add new entry to the list */
673 return netdev_boot_setup_add(str, &map);
674}
675
676__setup("netdev=", netdev_boot_setup);
677
678/*******************************************************************************
679
680 Device Interface Subroutines
681
682*******************************************************************************/
683
684/**
685 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700686 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 * @name: name to find
688 *
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
694 */
695
Eric W. Biederman881d9662007-09-17 11:56:21 -0700696struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
698 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700699 struct net_device *dev;
700 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700702 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (!strncmp(dev->name, name, IFNAMSIZ))
704 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return NULL;
707}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700708EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 * dev_get_by_name_rcu - find a device by its name
712 * @net: the applicable net namespace
713 * @name: name to find
714 *
715 * Find an interface by name.
716 * If the name is found a pointer to the device is returned.
717 * If the name is not found then %NULL is returned.
718 * The reference counters are not incremented so the caller must be
719 * careful with locks. The caller must hold RCU lock.
720 */
721
722struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
723{
724 struct hlist_node *p;
725 struct net_device *dev;
726 struct hlist_head *head = dev_name_hash(net, name);
727
728 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
729 if (!strncmp(dev->name, name, IFNAMSIZ))
730 return dev;
731
732 return NULL;
733}
734EXPORT_SYMBOL(dev_get_by_name_rcu);
735
736/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700738 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 * @name: name to find
740 *
741 * Find an interface by name. This can be called from any
742 * context and does its own locking. The returned handle has
743 * the usage count incremented and the caller must use dev_put() to
744 * release it when it is no longer needed. %NULL is returned if no
745 * matching device is found.
746 */
747
Eric W. Biederman881d9662007-09-17 11:56:21 -0700748struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 struct net_device *dev;
751
Eric Dumazet72c95282009-10-30 07:11:27 +0000752 rcu_read_lock();
753 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if (dev)
755 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return dev;
758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700759EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761/**
762 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700763 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * @ifindex: index of device
765 *
766 * Search for an interface by index. Returns %NULL if the device
767 * is not found or a pointer to the device. The device has not
768 * had its reference counter increased so the caller must be careful
769 * about locking. The caller must hold either the RTNL semaphore
770 * or @dev_base_lock.
771 */
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700776 struct net_device *dev;
777 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700779 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (dev->ifindex == ifindex)
781 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return NULL;
784}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700785EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000787/**
788 * dev_get_by_index_rcu - find a device by its ifindex
789 * @net: the applicable net namespace
790 * @ifindex: index of device
791 *
792 * Search for an interface by index. Returns %NULL if the device
793 * is not found or a pointer to the device. The device has not
794 * had its reference counter increased so the caller must be careful
795 * about locking. The caller must hold RCU lock.
796 */
797
798struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
799{
800 struct hlist_node *p;
801 struct net_device *dev;
802 struct hlist_head *head = dev_index_hash(net, ifindex);
803
804 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
805 if (dev->ifindex == ifindex)
806 return dev;
807
808 return NULL;
809}
810EXPORT_SYMBOL(dev_get_by_index_rcu);
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
813/**
814 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700815 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * @ifindex: index of device
817 *
818 * Search for an interface by index. Returns NULL if the device
819 * is not found or a pointer to the device. The device returned has
820 * had a reference added and the pointer is safe until the user calls
821 * dev_put to indicate they have finished with it.
822 */
823
Eric W. Biederman881d9662007-09-17 11:56:21 -0700824struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
826 struct net_device *dev;
827
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000828 rcu_read_lock();
829 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (dev)
831 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000832 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return dev;
834}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700835EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000838 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700839 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * @type: media type of device
841 * @ha: hardware address
842 *
843 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800844 * is not found or a pointer to the device.
845 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000846 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 * and the caller must therefore be careful about locking
848 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
850
Eric Dumazet941666c2010-12-05 01:23:53 +0000851struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
852 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
854 struct net_device *dev;
855
Eric Dumazet941666c2010-12-05 01:23:53 +0000856 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (dev->type == type &&
858 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700859 return dev;
860
861 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
Eric Dumazet941666c2010-12-05 01:23:53 +0000863EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300864
Eric W. Biederman881d9662007-09-17 11:56:21 -0700865struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700866{
867 struct net_device *dev;
868
869 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700870 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700871 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700872 return dev;
873
874 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700876EXPORT_SYMBOL(__dev_getfirstbyhwtype);
877
Eric W. Biederman881d9662007-09-17 11:56:21 -0700878struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000880 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000882 rcu_read_lock();
883 for_each_netdev_rcu(net, dev)
884 if (dev->type == type) {
885 dev_hold(dev);
886 ret = dev;
887 break;
888 }
889 rcu_read_unlock();
890 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892EXPORT_SYMBOL(dev_getfirstbyhwtype);
893
894/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000895 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700896 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 * @if_flags: IFF_* values
898 * @mask: bitmask of bits in if_flags to check
899 *
900 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000901 * is not found or a pointer to the device. Must be called inside
902 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 */
904
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000905struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700906 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700908 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Pavel Emelianov7562f872007-05-03 15:13:45 -0700910 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800911 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700913 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915 }
916 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700917 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000919EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921/**
922 * dev_valid_name - check if name is okay for network device
923 * @name: name string
924 *
925 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700926 * to allow sysfs to work. We also disallow any kind of
927 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 */
David S. Miller95f050b2012-03-06 16:12:15 -0500929bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700931 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500932 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700933 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500934 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700935 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500936 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700937
938 while (*name) {
939 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500940 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700941 name++;
942 }
David S. Miller95f050b2012-03-06 16:12:15 -0500943 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700945EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200948 * __dev_alloc_name - allocate a name for a device
949 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200951 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 *
953 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700954 * id. It scans list of devices to build up a free map, then chooses
955 * the first empty slot. The caller must hold the dev_base or rtnl lock
956 * while allocating the name and adding the device in order to avoid
957 * duplicates.
958 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
959 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 */
961
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200962static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
964 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 const char *p;
966 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700967 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 struct net_device *d;
969
970 p = strnchr(name, IFNAMSIZ-1, '%');
971 if (p) {
972 /*
973 * Verify the string as this thing may have come from
974 * the user. There must be either one "%d" and no other "%"
975 * characters.
976 */
977 if (p[1] != 'd' || strchr(p + 2, '%'))
978 return -EINVAL;
979
980 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700981 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (!inuse)
983 return -ENOMEM;
984
Eric W. Biederman881d9662007-09-17 11:56:21 -0700985 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (!sscanf(d->name, name, &i))
987 continue;
988 if (i < 0 || i >= max_netdevices)
989 continue;
990
991 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 if (!strncmp(buf, d->name, IFNAMSIZ))
994 set_bit(i, inuse);
995 }
996
997 i = find_first_zero_bit(inuse, max_netdevices);
998 free_page((unsigned long) inuse);
999 }
1000
Octavian Purdilad9031022009-11-18 02:36:59 +00001001 if (buf != name)
1002 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001003 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /* It is possible to run out of possible slots
1007 * when the name is long and there isn't enough space left
1008 * for the digits, or if all bits are used.
1009 */
1010 return -ENFILE;
1011}
1012
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001013/**
1014 * dev_alloc_name - allocate a name for a device
1015 * @dev: device
1016 * @name: name format string
1017 *
1018 * Passed a format string - eg "lt%d" it will try and find a suitable
1019 * id. It scans list of devices to build up a free map, then chooses
1020 * the first empty slot. The caller must hold the dev_base or rtnl lock
1021 * while allocating the name and adding the device in order to avoid
1022 * duplicates.
1023 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1024 * Returns the number of the unit assigned or a negative errno code.
1025 */
1026
1027int dev_alloc_name(struct net_device *dev, const char *name)
1028{
1029 char buf[IFNAMSIZ];
1030 struct net *net;
1031 int ret;
1032
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001033 BUG_ON(!dev_net(dev));
1034 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001035 ret = __dev_alloc_name(net, name, buf);
1036 if (ret >= 0)
1037 strlcpy(dev->name, buf, IFNAMSIZ);
1038 return ret;
1039}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001040EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001041
Gao feng828de4f2012-09-13 20:58:27 +00001042static int dev_alloc_name_ns(struct net *net,
1043 struct net_device *dev,
1044 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001045{
Gao feng828de4f2012-09-13 20:58:27 +00001046 char buf[IFNAMSIZ];
1047 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001048
Gao feng828de4f2012-09-13 20:58:27 +00001049 ret = __dev_alloc_name(net, name, buf);
1050 if (ret >= 0)
1051 strlcpy(dev->name, buf, IFNAMSIZ);
1052 return ret;
1053}
1054
1055static int dev_get_valid_name(struct net *net,
1056 struct net_device *dev,
1057 const char *name)
1058{
1059 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001060
Octavian Purdilad9031022009-11-18 02:36:59 +00001061 if (!dev_valid_name(name))
1062 return -EINVAL;
1063
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001064 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001065 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001066 else if (__dev_get_by_name(net, name))
1067 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001068 else if (dev->name != name)
1069 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001070
1071 return 0;
1072}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074/**
1075 * dev_change_name - change name of a device
1076 * @dev: device
1077 * @newname: name (or format string) must be at least IFNAMSIZ
1078 *
1079 * Change name of a device, can pass format strings "eth%d".
1080 * for wildcarding.
1081 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001082int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Herbert Xufcc5a032007-07-30 17:03:38 -07001084 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001086 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001087 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001090 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001092 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (dev->flags & IFF_UP)
1094 return -EBUSY;
1095
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001096 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001097
1098 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001099 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001100 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001101 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001102
Herbert Xufcc5a032007-07-30 17:03:38 -07001103 memcpy(oldname, dev->name, IFNAMSIZ);
1104
Gao feng828de4f2012-09-13 20:58:27 +00001105 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001106 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001107 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001108 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Herbert Xufcc5a032007-07-30 17:03:38 -07001111rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001112 ret = device_rename(&dev->dev, dev->name);
1113 if (ret) {
1114 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001115 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001116 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001117 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001118
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001119 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001120
Herbert Xu7f988ea2007-07-30 16:35:46 -07001121 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001122 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001123 write_unlock_bh(&dev_base_lock);
1124
1125 synchronize_rcu();
1126
1127 write_lock_bh(&dev_base_lock);
1128 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001129 write_unlock_bh(&dev_base_lock);
1130
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001131 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001132 ret = notifier_to_errno(ret);
1133
1134 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001135 /* err >= 0 after dev_alloc_name() or stores the first errno */
1136 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001137 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001138 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001139 memcpy(dev->name, oldname, IFNAMSIZ);
1140 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001141 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001142 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001143 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001144 }
1145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 return err;
1148}
1149
1150/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001151 * dev_set_alias - change ifalias of a device
1152 * @dev: device
1153 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001154 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001155 *
1156 * Set ifalias for a device,
1157 */
1158int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1159{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001160 char *new_ifalias;
1161
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001162 ASSERT_RTNL();
1163
1164 if (len >= IFALIASZ)
1165 return -EINVAL;
1166
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001167 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001168 kfree(dev->ifalias);
1169 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001170 return 0;
1171 }
1172
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001173 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1174 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001175 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001176 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001177
1178 strlcpy(dev->ifalias, alias, len+1);
1179 return len;
1180}
1181
1182
1183/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001184 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001185 * @dev: device to cause notification
1186 *
1187 * Called to indicate a device has changed features.
1188 */
1189void netdev_features_change(struct net_device *dev)
1190{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001191 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001192}
1193EXPORT_SYMBOL(netdev_features_change);
1194
1195/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 * netdev_state_change - device changes state
1197 * @dev: device to cause notification
1198 *
1199 * Called to indicate a device has changed state. This function calls
1200 * the notifier chains for netdev_chain and sends a NEWLINK message
1201 * to the routing socket.
1202 */
1203void netdev_state_change(struct net_device *dev)
1204{
1205 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001206 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1208 }
1209}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001210EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Amerigo Wangee89bab2012-08-09 22:14:56 +00001212/**
1213 * netdev_notify_peers - notify network peers about existence of @dev
1214 * @dev: network device
1215 *
1216 * Generate traffic such that interested network peers are aware of
1217 * @dev, such as by generating a gratuitous ARP. This may be used when
1218 * a device wants to inform the rest of the network about some sort of
1219 * reconfiguration such as a failover event or virtual machine
1220 * migration.
1221 */
1222void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001223{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001224 rtnl_lock();
1225 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1226 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001227}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001228EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230/**
1231 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001232 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 * @name: name of interface
1234 *
1235 * If a network interface is not present and the process has suitable
1236 * privileges this function loads the module. If module loading is not
1237 * available in this kernel then it becomes a nop.
1238 */
1239
Eric W. Biederman881d9662007-09-17 11:56:21 -07001240void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001242 struct net_device *dev;
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001243 int no_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Eric Dumazet72c95282009-10-30 07:11:27 +00001245 rcu_read_lock();
1246 dev = dev_get_by_name_rcu(net, name);
1247 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001249 no_module = !dev;
1250 if (no_module && capable(CAP_NET_ADMIN))
1251 no_module = request_module("netdev-%s", name);
1252 if (no_module && capable(CAP_SYS_MODULE)) {
1253 if (!request_module("%s", name))
Vinson Lee7cecb522012-06-27 14:32:07 +00001254 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1255 name);
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001258EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Patrick McHardybd380812010-02-26 06:34:53 +00001260static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001262 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001263 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001265 ASSERT_RTNL();
1266
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 if (!netif_device_present(dev))
1268 return -ENODEV;
1269
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001270 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271 ret = notifier_to_errno(ret);
1272 if (ret)
1273 return ret;
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001276
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001277 if (ops->ndo_validate_addr)
1278 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001279
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001280 if (!ret && ops->ndo_open)
1281 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Jeff Garzikbada3392007-10-23 20:19:37 -07001283 if (ret)
1284 clear_bit(__LINK_STATE_START, &dev->state);
1285 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001287 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001288 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001290 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 return ret;
1294}
Patrick McHardybd380812010-02-26 06:34:53 +00001295
1296/**
1297 * dev_open - prepare an interface for use.
1298 * @dev: device to open
1299 *
1300 * Takes a device from down to up state. The device's private open
1301 * function is invoked and then the multicast lists are loaded. Finally
1302 * the device is moved into the up state and a %NETDEV_UP message is
1303 * sent to the netdev notifier chain.
1304 *
1305 * Calling this function on an active interface is a nop. On a failure
1306 * a negative errno code is returned.
1307 */
1308int dev_open(struct net_device *dev)
1309{
1310 int ret;
1311
Patrick McHardybd380812010-02-26 06:34:53 +00001312 if (dev->flags & IFF_UP)
1313 return 0;
1314
Patrick McHardybd380812010-02-26 06:34:53 +00001315 ret = __dev_open(dev);
1316 if (ret < 0)
1317 return ret;
1318
Patrick McHardybd380812010-02-26 06:34:53 +00001319 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1320 call_netdevice_notifiers(NETDEV_UP, dev);
1321
1322 return ret;
1323}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001324EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Octavian Purdila44345722010-12-13 12:44:07 +00001326static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
Octavian Purdila44345722010-12-13 12:44:07 +00001328 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001329
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001330 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001331 might_sleep();
1332
Octavian Purdila44345722010-12-13 12:44:07 +00001333 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001334 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Octavian Purdila44345722010-12-13 12:44:07 +00001336 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Octavian Purdila44345722010-12-13 12:44:07 +00001338 /* Synchronize to scheduled poll. We cannot touch poll list, it
1339 * can be even on different cpu. So just clear netif_running().
1340 *
1341 * dev->stop() will invoke napi_disable() on all of it's
1342 * napi_struct instances on this device.
1343 */
1344 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Octavian Purdila44345722010-12-13 12:44:07 +00001347 dev_deactivate_many(head);
1348
1349 list_for_each_entry(dev, head, unreg_list) {
1350 const struct net_device_ops *ops = dev->netdev_ops;
1351
1352 /*
1353 * Call the device specific close. This cannot fail.
1354 * Only if device is UP
1355 *
1356 * We allow it to be called even after a DETACH hot-plug
1357 * event.
1358 */
1359 if (ops->ndo_stop)
1360 ops->ndo_stop(dev);
1361
Octavian Purdila44345722010-12-13 12:44:07 +00001362 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001363 net_dmaengine_put();
1364 }
1365
1366 return 0;
1367}
1368
1369static int __dev_close(struct net_device *dev)
1370{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001371 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001372 LIST_HEAD(single);
1373
1374 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001375 retval = __dev_close_many(&single);
1376 list_del(&single);
1377 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001378}
1379
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001380static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001381{
1382 struct net_device *dev, *tmp;
1383 LIST_HEAD(tmp_list);
1384
1385 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1386 if (!(dev->flags & IFF_UP))
1387 list_move(&dev->unreg_list, &tmp_list);
1388
1389 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001390
Octavian Purdila44345722010-12-13 12:44:07 +00001391 list_for_each_entry(dev, head, unreg_list) {
1392 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1393 call_netdevice_notifiers(NETDEV_DOWN, dev);
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Octavian Purdila44345722010-12-13 12:44:07 +00001396 /* rollback_registered_many needs the complete original list */
1397 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return 0;
1399}
Patrick McHardybd380812010-02-26 06:34:53 +00001400
1401/**
1402 * dev_close - shutdown an interface.
1403 * @dev: device to shutdown
1404 *
1405 * This function moves an active device into down state. A
1406 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1407 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1408 * chain.
1409 */
1410int dev_close(struct net_device *dev)
1411{
Eric Dumazete14a5992011-05-10 12:26:06 -07001412 if (dev->flags & IFF_UP) {
1413 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001414
Eric Dumazete14a5992011-05-10 12:26:06 -07001415 list_add(&dev->unreg_list, &single);
1416 dev_close_many(&single);
1417 list_del(&single);
1418 }
Patrick McHardybd380812010-02-26 06:34:53 +00001419 return 0;
1420}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001421EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001424/**
1425 * dev_disable_lro - disable Large Receive Offload on a device
1426 * @dev: device
1427 *
1428 * Disable Large Receive Offload (LRO) on a net device. Must be
1429 * called under RTNL. This is needed if received packets may be
1430 * forwarded to another interface.
1431 */
1432void dev_disable_lro(struct net_device *dev)
1433{
Neil Hormanf11970e2011-05-24 08:31:09 +00001434 /*
1435 * If we're trying to disable lro on a vlan device
1436 * use the underlying physical device instead
1437 */
1438 if (is_vlan_dev(dev))
1439 dev = vlan_dev_real_dev(dev);
1440
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001441 dev->wanted_features &= ~NETIF_F_LRO;
1442 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001443
Michał Mirosław22d59692011-04-21 12:42:15 +00001444 if (unlikely(dev->features & NETIF_F_LRO))
1445 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001446}
1447EXPORT_SYMBOL(dev_disable_lro);
1448
1449
Eric W. Biederman881d9662007-09-17 11:56:21 -07001450static int dev_boot_phase = 1;
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452/**
1453 * register_netdevice_notifier - register a network notifier block
1454 * @nb: notifier
1455 *
1456 * Register a notifier to be called when network device events occur.
1457 * The notifier passed is linked into the kernel structures and must
1458 * not be reused until it has been unregistered. A negative errno code
1459 * is returned on a failure.
1460 *
1461 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001462 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 * view of the network device list.
1464 */
1465
1466int register_netdevice_notifier(struct notifier_block *nb)
1467{
1468 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001469 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001470 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 int err;
1472
1473 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001474 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001475 if (err)
1476 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001477 if (dev_boot_phase)
1478 goto unlock;
1479 for_each_net(net) {
1480 for_each_netdev(net, dev) {
1481 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1482 err = notifier_to_errno(err);
1483 if (err)
1484 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Eric W. Biederman881d9662007-09-17 11:56:21 -07001486 if (!(dev->flags & IFF_UP))
1487 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001488
Eric W. Biederman881d9662007-09-17 11:56:21 -07001489 nb->notifier_call(nb, NETDEV_UP, dev);
1490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001492
1493unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 rtnl_unlock();
1495 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001496
1497rollback:
1498 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001499 for_each_net(net) {
1500 for_each_netdev(net, dev) {
1501 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001502 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001503
Eric W. Biederman881d9662007-09-17 11:56:21 -07001504 if (dev->flags & IFF_UP) {
1505 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1506 nb->notifier_call(nb, NETDEV_DOWN, dev);
1507 }
1508 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001510 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001511
RongQing.Li8f891482011-11-30 23:43:07 -05001512outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001513 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001514 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001516EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518/**
1519 * unregister_netdevice_notifier - unregister a network notifier block
1520 * @nb: notifier
1521 *
1522 * Unregister a notifier previously registered by
1523 * register_netdevice_notifier(). The notifier is unlinked into the
1524 * kernel structures and may then be reused. A negative errno code
1525 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001526 *
1527 * After unregistering unregister and down device events are synthesized
1528 * for all devices on the device list to the removed notifier to remove
1529 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 */
1531
1532int unregister_netdevice_notifier(struct notifier_block *nb)
1533{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001534 struct net_device *dev;
1535 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001536 int err;
1537
1538 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001539 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001540 if (err)
1541 goto unlock;
1542
1543 for_each_net(net) {
1544 for_each_netdev(net, dev) {
1545 if (dev->flags & IFF_UP) {
1546 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1547 nb->notifier_call(nb, NETDEV_DOWN, dev);
1548 }
1549 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001550 }
1551 }
1552unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001553 rtnl_unlock();
1554 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001556EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
1558/**
1559 * call_netdevice_notifiers - call all network notifier blocks
1560 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001561 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 *
1563 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001564 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 */
1566
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001567int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
Jiri Pirkoab930472010-04-20 01:45:37 -07001569 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001570 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001572EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Ingo Molnarc5905af2012-02-24 08:31:31 +01001574static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001575#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001576/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001577 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001578 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001579 */
1580static atomic_t netstamp_needed_deferred;
1581#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583void net_enable_timestamp(void)
1584{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001585#ifdef HAVE_JUMP_LABEL
1586 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1587
1588 if (deferred) {
1589 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001590 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001591 return;
1592 }
1593#endif
1594 WARN_ON(in_interrupt());
Ingo Molnarc5905af2012-02-24 08:31:31 +01001595 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001597EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599void net_disable_timestamp(void)
1600{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001601#ifdef HAVE_JUMP_LABEL
1602 if (in_interrupt()) {
1603 atomic_inc(&netstamp_needed_deferred);
1604 return;
1605 }
1606#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001607 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001609EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Eric Dumazet3b098e22010-05-15 23:57:10 -07001611static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612{
Eric Dumazet588f0332011-11-15 04:12:55 +00001613 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001614 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001615 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
1617
Eric Dumazet588f0332011-11-15 04:12:55 +00001618#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001619 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001620 if ((COND) && !(SKB)->tstamp.tv64) \
1621 __net_timestamp(SKB); \
1622 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001623
Richard Cochran4dc360c2011-10-19 17:00:35 -04001624static int net_hwtstamp_validate(struct ifreq *ifr)
1625{
1626 struct hwtstamp_config cfg;
1627 enum hwtstamp_tx_types tx_type;
1628 enum hwtstamp_rx_filters rx_filter;
1629 int tx_type_valid = 0;
1630 int rx_filter_valid = 0;
1631
1632 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1633 return -EFAULT;
1634
1635 if (cfg.flags) /* reserved for future extensions */
1636 return -EINVAL;
1637
1638 tx_type = cfg.tx_type;
1639 rx_filter = cfg.rx_filter;
1640
1641 switch (tx_type) {
1642 case HWTSTAMP_TX_OFF:
1643 case HWTSTAMP_TX_ON:
1644 case HWTSTAMP_TX_ONESTEP_SYNC:
1645 tx_type_valid = 1;
1646 break;
1647 }
1648
1649 switch (rx_filter) {
1650 case HWTSTAMP_FILTER_NONE:
1651 case HWTSTAMP_FILTER_ALL:
1652 case HWTSTAMP_FILTER_SOME:
1653 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1654 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1655 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1657 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1658 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1659 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1660 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1662 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1663 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1664 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1665 rx_filter_valid = 1;
1666 break;
1667 }
1668
1669 if (!tx_type_valid || !rx_filter_valid)
1670 return -ERANGE;
1671
1672 return 0;
1673}
1674
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001675static inline bool is_skb_forwardable(struct net_device *dev,
1676 struct sk_buff *skb)
1677{
1678 unsigned int len;
1679
1680 if (!(dev->flags & IFF_UP))
1681 return false;
1682
1683 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1684 if (skb->len <= len)
1685 return true;
1686
1687 /* if TSO is enabled, we don't care about the length as the packet
1688 * could be forwarded without being segmented before
1689 */
1690 if (skb_is_gso(skb))
1691 return true;
1692
1693 return false;
1694}
1695
Arnd Bergmann44540962009-11-26 06:07:08 +00001696/**
1697 * dev_forward_skb - loopback an skb to another netif
1698 *
1699 * @dev: destination network device
1700 * @skb: buffer to forward
1701 *
1702 * return values:
1703 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001704 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001705 *
1706 * dev_forward_skb can be used for injecting an skb from the
1707 * start_xmit function of one device into the receive queue
1708 * of another device.
1709 *
1710 * The receiving device may be in another namespace, so
1711 * we have to clear all information in the skb that could
1712 * impact namespace isolation.
1713 */
1714int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1715{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001716 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1717 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1718 atomic_long_inc(&dev->rx_dropped);
1719 kfree_skb(skb);
1720 return NET_RX_DROP;
1721 }
1722 }
1723
Arnd Bergmann44540962009-11-26 06:07:08 +00001724 skb_orphan(skb);
Ben Greearc736eef2010-07-22 09:54:47 +00001725 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001726
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001727 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001728 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001729 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001730 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001731 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001732 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001733 skb->dev = dev;
1734 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001735 skb->tstamp.tv64 = 0;
1736 skb->pkt_type = PACKET_HOST;
1737 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001738 skb->mark = 0;
1739 secpath_reset(skb);
1740 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001741 return netif_rx(skb);
1742}
1743EXPORT_SYMBOL_GPL(dev_forward_skb);
1744
Changli Gao71d9dec2010-12-15 19:57:25 +00001745static inline int deliver_skb(struct sk_buff *skb,
1746 struct packet_type *pt_prev,
1747 struct net_device *orig_dev)
1748{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001749 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1750 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001751 atomic_inc(&skb->users);
1752 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1753}
1754
Eric Leblondc0de08d2012-08-16 22:02:58 +00001755static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1756{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001757 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001758 return false;
1759
1760 if (ptype->id_match)
1761 return ptype->id_match(ptype, skb->sk);
1762 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1763 return true;
1764
1765 return false;
1766}
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768/*
1769 * Support routine. Sends outgoing frames to any network
1770 * taps currently in use.
1771 */
1772
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001773static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
1775 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001776 struct sk_buff *skb2 = NULL;
1777 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 rcu_read_lock();
1780 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1781 /* Never send packets back to the socket
1782 * they originated from - MvS (miquels@drinkel.ow.org)
1783 */
1784 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001785 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001786 if (pt_prev) {
1787 deliver_skb(skb2, pt_prev, skb->dev);
1788 pt_prev = ptype;
1789 continue;
1790 }
1791
1792 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 if (!skb2)
1794 break;
1795
Eric Dumazet70978182010-12-20 21:22:51 +00001796 net_timestamp_set(skb2);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 /* skb->nh should be correctly
1799 set by sender, so that the second statement is
1800 just protection against buggy protocols.
1801 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001802 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001804 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001805 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001806 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1807 ntohs(skb2->protocol),
1808 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001809 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001812 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001814 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001817 if (pt_prev)
1818 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 rcu_read_unlock();
1820}
1821
Ben Hutchings2c530402012-07-10 10:55:09 +00001822/**
1823 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001824 * @dev: Network device
1825 * @txq: number of queues available
1826 *
1827 * If real_num_tx_queues is changed the tc mappings may no longer be
1828 * valid. To resolve this verify the tc mapping remains valid and if
1829 * not NULL the mapping. With no priorities mapping to this
1830 * offset/count pair it will no longer be used. In the worst case TC0
1831 * is invalid nothing can be done so disable priority mappings. If is
1832 * expected that drivers will fix this mapping if they can before
1833 * calling netif_set_real_num_tx_queues.
1834 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001835static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001836{
1837 int i;
1838 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1839
1840 /* If TC0 is invalidated disable TC mapping */
1841 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001842 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001843 dev->num_tc = 0;
1844 return;
1845 }
1846
1847 /* Invalidated prio to tc mappings set to TC0 */
1848 for (i = 1; i < TC_BITMASK + 1; i++) {
1849 int q = netdev_get_prio_tc_map(dev, i);
1850
1851 tc = &dev->tc_to_txq[q];
1852 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001853 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1854 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001855 netdev_set_prio_tc_map(dev, i, 0);
1856 }
1857 }
1858}
1859
Alexander Duyck537c00d2013-01-10 08:57:02 +00001860#ifdef CONFIG_XPS
1861static DEFINE_MUTEX(xps_map_mutex);
1862#define xmap_dereference(P) \
1863 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1864
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001865static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1866 int cpu, u16 index)
1867{
1868 struct xps_map *map = NULL;
1869 int pos;
1870
1871 if (dev_maps)
1872 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1873
1874 for (pos = 0; map && pos < map->len; pos++) {
1875 if (map->queues[pos] == index) {
1876 if (map->len > 1) {
1877 map->queues[pos] = map->queues[--map->len];
1878 } else {
1879 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1880 kfree_rcu(map, rcu);
1881 map = NULL;
1882 }
1883 break;
1884 }
1885 }
1886
1887 return map;
1888}
1889
Alexander Duyck537c00d2013-01-10 08:57:02 +00001890void netif_reset_xps_queue(struct net_device *dev, u16 index)
1891{
1892 struct xps_dev_maps *dev_maps;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001893 int cpu;
1894 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001895
1896 mutex_lock(&xps_map_mutex);
1897 dev_maps = xmap_dereference(dev->xps_maps);
1898
1899 if (!dev_maps)
1900 goto out_no_maps;
1901
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001902 for_each_possible_cpu(cpu) {
1903 if (remove_xps_queue(dev_maps, cpu, index))
1904 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001905 }
1906
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001907 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001908 RCU_INIT_POINTER(dev->xps_maps, NULL);
1909 kfree_rcu(dev_maps, rcu);
1910 }
1911
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001912 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1913 NUMA_NO_NODE);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001914out_no_maps:
1915 mutex_unlock(&xps_map_mutex);
1916}
1917
Alexander Duyck01c5f862013-01-10 08:57:35 +00001918static struct xps_map *expand_xps_map(struct xps_map *map,
1919 int cpu, u16 index)
1920{
1921 struct xps_map *new_map;
1922 int alloc_len = XPS_MIN_MAP_ALLOC;
1923 int i, pos;
1924
1925 for (pos = 0; map && pos < map->len; pos++) {
1926 if (map->queues[pos] != index)
1927 continue;
1928 return map;
1929 }
1930
1931 /* Need to add queue to this CPU's existing map */
1932 if (map) {
1933 if (pos < map->alloc_len)
1934 return map;
1935
1936 alloc_len = map->alloc_len * 2;
1937 }
1938
1939 /* Need to allocate new map to store queue on this CPU's map */
1940 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1941 cpu_to_node(cpu));
1942 if (!new_map)
1943 return NULL;
1944
1945 for (i = 0; i < pos; i++)
1946 new_map->queues[i] = map->queues[i];
1947 new_map->alloc_len = alloc_len;
1948 new_map->len = pos;
1949
1950 return new_map;
1951}
1952
Alexander Duyck537c00d2013-01-10 08:57:02 +00001953int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1954{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001955 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001956 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001957 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001958 int cpu, numa_node_id = -2;
1959 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001960
1961 mutex_lock(&xps_map_mutex);
1962
1963 dev_maps = xmap_dereference(dev->xps_maps);
1964
Alexander Duyck01c5f862013-01-10 08:57:35 +00001965 /* allocate memory for queue storage */
1966 for_each_online_cpu(cpu) {
1967 if (!cpumask_test_cpu(cpu, mask))
1968 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001969
Alexander Duyck01c5f862013-01-10 08:57:35 +00001970 if (!new_dev_maps)
1971 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1972 if (!new_dev_maps)
1973 return -ENOMEM;
1974
1975 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1976 NULL;
1977
1978 map = expand_xps_map(map, cpu, index);
1979 if (!map)
1980 goto error;
1981
1982 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1983 }
1984
1985 if (!new_dev_maps)
1986 goto out_no_new_maps;
1987
1988 for_each_possible_cpu(cpu) {
1989 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1990 /* add queue to CPU maps */
1991 int pos = 0;
1992
1993 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1994 while ((pos < map->len) && (map->queues[pos] != index))
1995 pos++;
1996
1997 if (pos == map->len)
1998 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001999#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002000 if (numa_node_id == -2)
2001 numa_node_id = cpu_to_node(cpu);
2002 else if (numa_node_id != cpu_to_node(cpu))
2003 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002004#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002005 } else if (dev_maps) {
2006 /* fill in the new device map from the old device map */
2007 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2008 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002009 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002010
Alexander Duyck537c00d2013-01-10 08:57:02 +00002011 }
2012
Alexander Duyck01c5f862013-01-10 08:57:35 +00002013 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2014
Alexander Duyck537c00d2013-01-10 08:57:02 +00002015 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002016 if (dev_maps) {
2017 for_each_possible_cpu(cpu) {
2018 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2019 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2020 if (map && map != new_map)
2021 kfree_rcu(map, rcu);
2022 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002023
Alexander Duyck537c00d2013-01-10 08:57:02 +00002024 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002025 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002026
Alexander Duyck01c5f862013-01-10 08:57:35 +00002027 dev_maps = new_dev_maps;
2028 active = true;
2029
2030out_no_new_maps:
2031 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002032 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2033 (numa_node_id >= 0) ? numa_node_id :
2034 NUMA_NO_NODE);
2035
Alexander Duyck01c5f862013-01-10 08:57:35 +00002036 if (!dev_maps)
2037 goto out_no_maps;
2038
2039 /* removes queue from unused CPUs */
2040 for_each_possible_cpu(cpu) {
2041 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2042 continue;
2043
2044 if (remove_xps_queue(dev_maps, cpu, index))
2045 active = true;
2046 }
2047
2048 /* free map if not active */
2049 if (!active) {
2050 RCU_INIT_POINTER(dev->xps_maps, NULL);
2051 kfree_rcu(dev_maps, rcu);
2052 }
2053
2054out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002055 mutex_unlock(&xps_map_mutex);
2056
2057 return 0;
2058error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002059 /* remove any maps that we added */
2060 for_each_possible_cpu(cpu) {
2061 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2062 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2063 NULL;
2064 if (new_map && new_map != map)
2065 kfree(new_map);
2066 }
2067
Alexander Duyck537c00d2013-01-10 08:57:02 +00002068 mutex_unlock(&xps_map_mutex);
2069
Alexander Duyck537c00d2013-01-10 08:57:02 +00002070 kfree(new_dev_maps);
2071 return -ENOMEM;
2072}
2073EXPORT_SYMBOL(netif_set_xps_queue);
2074
2075#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002076/*
2077 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2078 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2079 */
Tom Herberte6484932010-10-18 18:04:39 +00002080int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002081{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002082 int rc;
2083
Tom Herberte6484932010-10-18 18:04:39 +00002084 if (txq < 1 || txq > dev->num_tx_queues)
2085 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002086
Ben Hutchings5c565802011-02-15 19:39:21 +00002087 if (dev->reg_state == NETREG_REGISTERED ||
2088 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002089 ASSERT_RTNL();
2090
Tom Herbert1d24eb42010-11-21 13:17:27 +00002091 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2092 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002093 if (rc)
2094 return rc;
2095
John Fastabend4f57c082011-01-17 08:06:04 +00002096 if (dev->num_tc)
2097 netif_setup_tc(dev, txq);
2098
Tom Herberte6484932010-10-18 18:04:39 +00002099 if (txq < dev->real_num_tx_queues)
2100 qdisc_reset_all_tx_gt(dev, txq);
John Fastabendf0796d52010-07-01 13:21:57 +00002101 }
Tom Herberte6484932010-10-18 18:04:39 +00002102
2103 dev->real_num_tx_queues = txq;
2104 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002105}
2106EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002107
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002108#ifdef CONFIG_RPS
2109/**
2110 * netif_set_real_num_rx_queues - set actual number of RX queues used
2111 * @dev: Network device
2112 * @rxq: Actual number of RX queues
2113 *
2114 * This must be called either with the rtnl_lock held or before
2115 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002116 * negative error code. If called before registration, it always
2117 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002118 */
2119int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2120{
2121 int rc;
2122
Tom Herbertbd25fa72010-10-18 18:00:16 +00002123 if (rxq < 1 || rxq > dev->num_rx_queues)
2124 return -EINVAL;
2125
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002126 if (dev->reg_state == NETREG_REGISTERED) {
2127 ASSERT_RTNL();
2128
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002129 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2130 rxq);
2131 if (rc)
2132 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002133 }
2134
2135 dev->real_num_rx_queues = rxq;
2136 return 0;
2137}
2138EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2139#endif
2140
Ben Hutchings2c530402012-07-10 10:55:09 +00002141/**
2142 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002143 *
2144 * This routine should set an upper limit on the number of RSS queues
2145 * used by default by multiqueue devices.
2146 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002147int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002148{
2149 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2150}
2151EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2152
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002153static inline void __netif_reschedule(struct Qdisc *q)
2154{
2155 struct softnet_data *sd;
2156 unsigned long flags;
2157
2158 local_irq_save(flags);
2159 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002160 q->next_sched = NULL;
2161 *sd->output_queue_tailp = q;
2162 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002163 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2164 local_irq_restore(flags);
2165}
2166
David S. Miller37437bb2008-07-16 02:15:04 -07002167void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002168{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002169 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2170 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002171}
2172EXPORT_SYMBOL(__netif_schedule);
2173
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002174void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002175{
David S. Miller3578b0c2010-08-03 00:24:04 -07002176 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002177 struct softnet_data *sd;
2178 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002179
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002180 local_irq_save(flags);
2181 sd = &__get_cpu_var(softnet_data);
2182 skb->next = sd->completion_queue;
2183 sd->completion_queue = skb;
2184 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2185 local_irq_restore(flags);
2186 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002187}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002188EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002189
2190void dev_kfree_skb_any(struct sk_buff *skb)
2191{
2192 if (in_irq() || irqs_disabled())
2193 dev_kfree_skb_irq(skb);
2194 else
2195 dev_kfree_skb(skb);
2196}
2197EXPORT_SYMBOL(dev_kfree_skb_any);
2198
2199
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002200/**
2201 * netif_device_detach - mark device as removed
2202 * @dev: network device
2203 *
2204 * Mark device as removed from system and therefore no longer available.
2205 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002206void netif_device_detach(struct net_device *dev)
2207{
2208 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2209 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002210 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002211 }
2212}
2213EXPORT_SYMBOL(netif_device_detach);
2214
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002215/**
2216 * netif_device_attach - mark device as attached
2217 * @dev: network device
2218 *
2219 * Mark device as attached from system and restart if needed.
2220 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002221void netif_device_attach(struct net_device *dev)
2222{
2223 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2224 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002225 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002226 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002227 }
2228}
2229EXPORT_SYMBOL(netif_device_attach);
2230
Ben Hutchings36c92472012-01-17 07:57:56 +00002231static void skb_warn_bad_offload(const struct sk_buff *skb)
2232{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002233 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002234 struct net_device *dev = skb->dev;
2235 const char *driver = "";
2236
2237 if (dev && dev->dev.parent)
2238 driver = dev_driver_string(dev->dev.parent);
2239
2240 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2241 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002242 driver, dev ? &dev->features : &null_features,
2243 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002244 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2245 skb_shinfo(skb)->gso_type, skb->ip_summed);
2246}
2247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248/*
2249 * Invalidate hardware checksum when packet is to be mangled, and
2250 * complete checksum manually on outgoing path.
2251 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002252int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253{
Al Virod3bc23e2006-11-14 21:24:49 -08002254 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002255 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
Patrick McHardy84fa7932006-08-29 16:44:56 -07002257 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002258 goto out_set_summed;
2259
2260 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002261 skb_warn_bad_offload(skb);
2262 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 }
2264
Michał Mirosław55508d62010-12-14 15:24:08 +00002265 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002266 BUG_ON(offset >= skb_headlen(skb));
2267 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2268
2269 offset += skb->csum_offset;
2270 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2271
2272 if (skb_cloned(skb) &&
2273 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2275 if (ret)
2276 goto out;
2277 }
2278
Herbert Xua0308472007-10-15 01:47:15 -07002279 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002280out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002282out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 return ret;
2284}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002285EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002287/**
2288 * skb_gso_segment - Perform segmentation on skb.
2289 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002290 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002291 *
2292 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002293 *
2294 * It may return NULL if the skb requires no segmentation. This is
2295 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002296 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002297struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2298 netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002299{
2300 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
Vlad Yasevich22061d82012-11-15 08:49:11 +00002301 struct packet_offload *ptype;
Al Viro252e3342006-11-14 20:48:11 -08002302 __be16 type = skb->protocol;
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002303 int vlan_depth = ETH_HLEN;
Herbert Xua430a432006-07-08 13:34:56 -07002304 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002305
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002306 while (type == htons(ETH_P_8021Q)) {
2307 struct vlan_hdr *vh;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002308
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002309 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
Jesse Gross7b9c6092010-10-20 13:56:04 +00002310 return ERR_PTR(-EINVAL);
2311
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002312 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2313 type = vh->h_vlan_encapsulated_proto;
2314 vlan_depth += VLAN_HLEN;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002315 }
2316
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002317 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002318 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002319 __skb_pull(skb, skb->mac_len);
2320
Herbert Xu67fd1a72009-01-19 16:26:44 -08002321 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002322 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002323
Herbert Xua430a432006-07-08 13:34:56 -07002324 if (skb_header_cloned(skb) &&
2325 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2326 return ERR_PTR(err);
2327 }
2328
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002329 rcu_read_lock();
Vlad Yasevich22061d82012-11-15 08:49:11 +00002330 list_for_each_entry_rcu(ptype, &offload_base, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002331 if (ptype->type == type && ptype->callbacks.gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07002332 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002333 err = ptype->callbacks.gso_send_check(skb);
Herbert Xua430a432006-07-08 13:34:56 -07002334 segs = ERR_PTR(err);
2335 if (err || skb_gso_ok(skb, features))
2336 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07002337 __skb_push(skb, (skb->data -
2338 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07002339 }
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002340 segs = ptype->callbacks.gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002341 break;
2342 }
2343 }
2344 rcu_read_unlock();
2345
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07002346 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07002347
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002348 return segs;
2349}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002350EXPORT_SYMBOL(skb_gso_segment);
2351
Herbert Xufb286bb2005-11-10 13:01:24 -08002352/* Take action when hardware reception checksum errors are detected. */
2353#ifdef CONFIG_BUG
2354void netdev_rx_csum_fault(struct net_device *dev)
2355{
2356 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002357 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002358 dump_stack();
2359 }
2360}
2361EXPORT_SYMBOL(netdev_rx_csum_fault);
2362#endif
2363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364/* Actually, we should eliminate this check as soon as we know, that:
2365 * 1. IOMMU is present and allows to map all the memory.
2366 * 2. No high memory really exists on this machine.
2367 */
2368
Eric Dumazet9092c652010-04-02 13:34:49 -07002369static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002371#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002373 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002374 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2375 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2376 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002377 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002378 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002379 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002381 if (PCI_DMA_BUS_IS_PHYS) {
2382 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
Eric Dumazet9092c652010-04-02 13:34:49 -07002384 if (!pdev)
2385 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002386 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002387 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2388 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002389 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2390 return 1;
2391 }
2392 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002393#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 return 0;
2395}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002397struct dev_gso_cb {
2398 void (*destructor)(struct sk_buff *skb);
2399};
2400
2401#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2402
2403static void dev_gso_skb_destructor(struct sk_buff *skb)
2404{
2405 struct dev_gso_cb *cb;
2406
2407 do {
2408 struct sk_buff *nskb = skb->next;
2409
2410 skb->next = nskb->next;
2411 nskb->next = NULL;
2412 kfree_skb(nskb);
2413 } while (skb->next);
2414
2415 cb = DEV_GSO_CB(skb);
2416 if (cb->destructor)
2417 cb->destructor(skb);
2418}
2419
2420/**
2421 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2422 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002423 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002424 *
2425 * This function segments the given skb and stores the list of segments
2426 * in skb->next.
2427 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002428static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002429{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002430 struct sk_buff *segs;
2431
Herbert Xu576a30e2006-06-27 13:22:38 -07002432 segs = skb_gso_segment(skb, features);
2433
2434 /* Verifying header integrity only. */
2435 if (!segs)
2436 return 0;
2437
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002438 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002439 return PTR_ERR(segs);
2440
2441 skb->next = segs;
2442 DEV_GSO_CB(skb)->destructor = skb->destructor;
2443 skb->destructor = dev_gso_skb_destructor;
2444
2445 return 0;
2446}
2447
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002448static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
Jesse Gross03634662011-01-09 06:23:35 +00002449{
2450 return ((features & NETIF_F_GEN_CSUM) ||
2451 ((features & NETIF_F_V4_CSUM) &&
2452 protocol == htons(ETH_P_IP)) ||
2453 ((features & NETIF_F_V6_CSUM) &&
2454 protocol == htons(ETH_P_IPV6)) ||
2455 ((features & NETIF_F_FCOE_CRC) &&
2456 protocol == htons(ETH_P_FCOE)));
2457}
2458
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002459static netdev_features_t harmonize_features(struct sk_buff *skb,
2460 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002461{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002462 if (skb->ip_summed != CHECKSUM_NONE &&
2463 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002464 features &= ~NETIF_F_ALL_CSUM;
2465 features &= ~NETIF_F_SG;
2466 } else if (illegal_highdma(skb->dev, skb)) {
2467 features &= ~NETIF_F_SG;
2468 }
2469
2470 return features;
2471}
2472
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002473netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002474{
2475 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002476 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002477
Ben Hutchings30b678d2012-07-30 15:57:00 +00002478 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2479 features &= ~NETIF_F_GSO_MASK;
2480
Jesse Gross58e998c2010-10-29 12:14:55 +00002481 if (protocol == htons(ETH_P_8021Q)) {
2482 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2483 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002484 } else if (!vlan_tx_tag_present(skb)) {
2485 return harmonize_features(skb, protocol, features);
2486 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002487
Jesse Gross6ee400a2011-01-17 20:46:00 +00002488 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002489
2490 if (protocol != htons(ETH_P_8021Q)) {
2491 return harmonize_features(skb, protocol, features);
2492 } else {
2493 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Jesse Gross6ee400a2011-01-17 20:46:00 +00002494 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002495 return harmonize_features(skb, protocol, features);
2496 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002497}
Jesse Grossf01a5232011-01-09 06:23:31 +00002498EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002499
John Fastabend6afff0c2010-06-16 14:18:12 +00002500/*
2501 * Returns true if either:
2502 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002503 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002504 */
2505static inline int skb_needs_linearize(struct sk_buff *skb,
Jesse Gross02932ce2011-01-09 06:23:34 +00002506 int features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002507{
Jesse Gross02932ce2011-01-09 06:23:34 +00002508 return skb_is_nonlinear(skb) &&
2509 ((skb_has_frag_list(skb) &&
2510 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002511 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002512 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002513}
2514
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002515int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2516 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002517{
Stephen Hemminger00829822008-11-20 20:14:53 -08002518 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002519 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002520 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002521
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002522 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002523 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002524
Eric Dumazet93f154b2009-05-18 22:19:19 -07002525 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002526 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002527 * its hot in this cpu cache
2528 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002529 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2530 skb_dst_drop(skb);
2531
Jesse Grossfc741212011-01-09 06:23:32 +00002532 features = netif_skb_features(skb);
2533
Jesse Gross7b9c6092010-10-20 13:56:04 +00002534 if (vlan_tx_tag_present(skb) &&
Jesse Grossfc741212011-01-09 06:23:32 +00002535 !(features & NETIF_F_HW_VLAN_TX)) {
Jesse Gross7b9c6092010-10-20 13:56:04 +00002536 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2537 if (unlikely(!skb))
2538 goto out;
2539
2540 skb->vlan_tci = 0;
2541 }
2542
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002543 /* If encapsulation offload request, verify we are testing
2544 * hardware encapsulation features instead of standard
2545 * features for the netdev
2546 */
2547 if (skb->encapsulation)
2548 features &= dev->hw_enc_features;
2549
Jesse Grossfc741212011-01-09 06:23:32 +00002550 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002551 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002552 goto out_kfree_skb;
2553 if (skb->next)
2554 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002555 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002556 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002557 __skb_linearize(skb))
2558 goto out_kfree_skb;
2559
2560 /* If packet is not checksummed and device does not
2561 * support checksumming for this protocol, complete
2562 * checksumming here.
2563 */
2564 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002565 if (skb->encapsulation)
2566 skb_set_inner_transport_header(skb,
2567 skb_checksum_start_offset(skb));
2568 else
2569 skb_set_transport_header(skb,
2570 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002571 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002572 skb_checksum_help(skb))
2573 goto out_kfree_skb;
2574 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002575 }
2576
Eric Dumazetb40863c2012-09-18 20:44:49 +00002577 if (!list_empty(&ptype_all))
2578 dev_queue_xmit_nit(skb, dev);
2579
Koki Sanagiec764bf2011-05-30 21:48:34 +00002580 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002581 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002582 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002583 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002584 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002585 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002586 }
2587
Herbert Xu576a30e2006-06-27 13:22:38 -07002588gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002589 do {
2590 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002591
2592 skb->next = nskb->next;
2593 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002594
2595 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002596 * If device doesn't need nskb->dst, release it right now while
Krishna Kumar068a2de2009-12-09 20:59:58 +00002597 * its hot in this cpu cache
2598 */
2599 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2600 skb_dst_drop(nskb);
2601
Eric Dumazetb40863c2012-09-18 20:44:49 +00002602 if (!list_empty(&ptype_all))
2603 dev_queue_xmit_nit(nskb, dev);
2604
Koki Sanagiec764bf2011-05-30 21:48:34 +00002605 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002606 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002607 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002608 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002609 if (rc & ~NETDEV_TX_MASK)
2610 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002611 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002612 skb->next = nskb;
2613 return rc;
2614 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002615 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002616 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002617 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002618 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002619
Patrick McHardy572a9d72009-11-10 06:14:14 +00002620out_kfree_gso_skb:
2621 if (likely(skb->next == NULL))
2622 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002623out_kfree_skb:
2624 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002625out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002626 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002627}
2628
Tom Herbert0a9627f2010-03-16 08:03:29 +00002629static u32 hashrnd __read_mostly;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002630
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002631/*
2632 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2633 * to be used as a distribution range.
2634 */
2635u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2636 unsigned int num_tx_queues)
David S. Miller8f0f2222008-07-15 03:47:03 -07002637{
David S. Miller70192982009-01-27 16:34:47 -08002638 u32 hash;
John Fastabend4f57c082011-01-17 08:06:04 +00002639 u16 qoffset = 0;
2640 u16 qcount = num_tx_queues;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002641
David S. Miller513de112009-05-03 14:43:10 -07002642 if (skb_rx_queue_recorded(skb)) {
2643 hash = skb_get_rx_queue(skb);
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002644 while (unlikely(hash >= num_tx_queues))
2645 hash -= num_tx_queues;
David S. Miller513de112009-05-03 14:43:10 -07002646 return hash;
2647 }
Eric Dumazetec581f62009-05-01 09:05:06 -07002648
John Fastabend4f57c082011-01-17 08:06:04 +00002649 if (dev->num_tc) {
2650 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2651 qoffset = dev->tc_to_txq[tc].offset;
2652 qcount = dev->tc_to_txq[tc].count;
2653 }
2654
Eric Dumazetec581f62009-05-01 09:05:06 -07002655 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08002656 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07002657 else
Eric Dumazet62b1a8a2012-06-14 06:42:44 +00002658 hash = (__force u16) skb->protocol;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002659 hash = jhash_1word(hash, hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08002660
John Fastabend4f57c082011-01-17 08:06:04 +00002661 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
David S. Miller8f0f2222008-07-15 03:47:03 -07002662}
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002663EXPORT_SYMBOL(__skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07002664
Eric Dumazeted046422009-11-13 21:54:04 +00002665static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2666{
2667 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
Joe Perchese87cc472012-05-13 21:56:26 +00002668 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2669 dev->name, queue_index,
2670 dev->real_num_tx_queues);
Eric Dumazeted046422009-11-13 21:54:04 +00002671 return 0;
2672 }
2673 return queue_index;
2674}
2675
Tom Herbert1d24eb42010-11-21 13:17:27 +00002676static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2677{
Tom Herbertbf264142010-11-26 08:36:09 +00002678#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00002679 struct xps_dev_maps *dev_maps;
2680 struct xps_map *map;
2681 int queue_index = -1;
2682
2683 rcu_read_lock();
2684 dev_maps = rcu_dereference(dev->xps_maps);
2685 if (dev_maps) {
2686 map = rcu_dereference(
2687 dev_maps->cpu_map[raw_smp_processor_id()]);
2688 if (map) {
2689 if (map->len == 1)
2690 queue_index = map->queues[0];
2691 else {
2692 u32 hash;
2693 if (skb->sk && skb->sk->sk_hash)
2694 hash = skb->sk->sk_hash;
2695 else
2696 hash = (__force u16) skb->protocol ^
2697 skb->rxhash;
2698 hash = jhash_1word(hash, hashrnd);
2699 queue_index = map->queues[
2700 ((u64)hash * map->len) >> 32];
2701 }
2702 if (unlikely(queue_index >= dev->real_num_tx_queues))
2703 queue_index = -1;
2704 }
2705 }
2706 rcu_read_unlock();
2707
2708 return queue_index;
2709#else
2710 return -1;
2711#endif
2712}
2713
Alexander Duyck416186f2013-01-10 08:56:51 +00002714u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2715{
2716 struct sock *sk = skb->sk;
2717 int queue_index = sk_tx_queue_get(sk);
2718
2719 if (queue_index < 0 || skb->ooo_okay ||
2720 queue_index >= dev->real_num_tx_queues) {
2721 int new_index = get_xps_queue(dev, skb);
2722 if (new_index < 0)
2723 new_index = skb_tx_hash(dev, skb);
2724
2725 if (queue_index != new_index && sk) {
2726 struct dst_entry *dst =
2727 rcu_dereference_check(sk->sk_dst_cache, 1);
2728
2729 if (dst && skb_dst(skb) == dst)
2730 sk_tx_queue_set(sk, queue_index);
2731
2732 }
2733
2734 queue_index = new_index;
2735 }
2736
2737 return queue_index;
2738}
2739
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002740struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2741 struct sk_buff *skb)
David S. Millere8a04642008-07-17 00:34:19 -07002742{
Alexander Duyck416186f2013-01-10 08:56:51 +00002743 int queue_index = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002744
Alexander Duyck416186f2013-01-10 08:56:51 +00002745 if (dev->real_num_tx_queues != 1) {
2746 const struct net_device_ops *ops = dev->netdev_ops;
2747 if (ops->ndo_select_queue)
2748 queue_index = ops->ndo_select_queue(dev, skb);
2749 else
2750 queue_index = __netdev_pick_tx(dev, skb);
Helmut Schaadeabc772010-09-03 02:39:56 +00002751 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002752 }
David S. Millereae792b2008-07-15 03:03:33 -07002753
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002754 skb_set_queue_mapping(skb, queue_index);
2755 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07002756}
2757
Eric Dumazet1def9232013-01-10 12:36:42 +00002758static void qdisc_pkt_len_init(struct sk_buff *skb)
2759{
2760 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2761
2762 qdisc_skb_cb(skb)->pkt_len = skb->len;
2763
2764 /* To get more precise estimation of bytes sent on wire,
2765 * we add to pkt_len the headers size of all segments
2766 */
2767 if (shinfo->gso_size) {
2768 unsigned int hdr_len = skb_transport_offset(skb);
2769
2770 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2771 hdr_len += tcp_hdrlen(skb);
2772 else
2773 hdr_len += sizeof(struct udphdr);
2774 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
2775 }
2776}
2777
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002778static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2779 struct net_device *dev,
2780 struct netdev_queue *txq)
2781{
2782 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002783 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002784 int rc;
2785
Eric Dumazet1def9232013-01-10 12:36:42 +00002786 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002787 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002788 /*
2789 * Heuristic to force contended enqueues to serialize on a
2790 * separate lock before trying to get qdisc main lock.
2791 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2792 * and dequeue packets faster.
2793 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002794 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002795 if (unlikely(contended))
2796 spin_lock(&q->busylock);
2797
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002798 spin_lock(root_lock);
2799 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2800 kfree_skb(skb);
2801 rc = NET_XMIT_DROP;
2802 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002803 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002804 /*
2805 * This is a work-conserving queue; there are no old skbs
2806 * waiting to be sent out; and the qdisc is not running -
2807 * xmit the skb directly.
2808 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002809 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2810 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002811
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002812 qdisc_bstats_update(q, skb);
2813
Eric Dumazet79640a42010-06-02 05:09:29 -07002814 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2815 if (unlikely(contended)) {
2816 spin_unlock(&q->busylock);
2817 contended = false;
2818 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002819 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002820 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002821 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002822
2823 rc = NET_XMIT_SUCCESS;
2824 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002825 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002826 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002827 if (qdisc_run_begin(q)) {
2828 if (unlikely(contended)) {
2829 spin_unlock(&q->busylock);
2830 contended = false;
2831 }
2832 __qdisc_run(q);
2833 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002834 }
2835 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002836 if (unlikely(contended))
2837 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002838 return rc;
2839}
2840
Neil Horman5bc14212011-11-22 05:10:51 +00002841#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2842static void skb_update_prio(struct sk_buff *skb)
2843{
Igor Maravic6977a792011-11-25 07:44:54 +00002844 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002845
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002846 if (!skb->priority && skb->sk && map) {
2847 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2848
2849 if (prioidx < map->priomap_len)
2850 skb->priority = map->priomap[prioidx];
2851 }
Neil Horman5bc14212011-11-22 05:10:51 +00002852}
2853#else
2854#define skb_update_prio(skb)
2855#endif
2856
Eric Dumazet745e20f2010-09-29 13:23:09 -07002857static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002858#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002859
Dave Jonesd29f7492008-07-22 14:09:06 -07002860/**
Michel Machado95603e22012-06-12 10:16:35 +00002861 * dev_loopback_xmit - loop back @skb
2862 * @skb: buffer to transmit
2863 */
2864int dev_loopback_xmit(struct sk_buff *skb)
2865{
2866 skb_reset_mac_header(skb);
2867 __skb_pull(skb, skb_network_offset(skb));
2868 skb->pkt_type = PACKET_LOOPBACK;
2869 skb->ip_summed = CHECKSUM_UNNECESSARY;
2870 WARN_ON(!skb_dst(skb));
2871 skb_dst_force(skb);
2872 netif_rx_ni(skb);
2873 return 0;
2874}
2875EXPORT_SYMBOL(dev_loopback_xmit);
2876
2877/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002878 * dev_queue_xmit - transmit a buffer
2879 * @skb: buffer to transmit
2880 *
2881 * Queue a buffer for transmission to a network device. The caller must
2882 * have set the device and priority and built the buffer before calling
2883 * this function. The function can be called from an interrupt.
2884 *
2885 * A negative errno code is returned on a failure. A success does not
2886 * guarantee the frame will be transmitted as it may be dropped due
2887 * to congestion or traffic shaping.
2888 *
2889 * -----------------------------------------------------------------------------------
2890 * I notice this method can also return errors from the queue disciplines,
2891 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2892 * be positive.
2893 *
2894 * Regardless of the return value, the skb is consumed, so it is currently
2895 * difficult to retry a send to this method. (You can bump the ref count
2896 * before sending to hold a reference for retry if you are careful.)
2897 *
2898 * When calling this method, interrupts MUST be enabled. This is because
2899 * the BH enable code must have IRQs enabled so that it will not deadlock.
2900 * --BLG
2901 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902int dev_queue_xmit(struct sk_buff *skb)
2903{
2904 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002905 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 struct Qdisc *q;
2907 int rc = -ENOMEM;
2908
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002909 /* Disable soft irqs for various locks below. Also
2910 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002912 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913
Neil Horman5bc14212011-11-22 05:10:51 +00002914 skb_update_prio(skb);
2915
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002916 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002917 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002920 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002922 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002924 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002925 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 }
2927
2928 /* The device has no queue. Common case for software devices:
2929 loopback, all the sorts of tunnels...
2930
Herbert Xu932ff272006-06-09 12:20:56 -07002931 Really, it is unlikely that netif_tx_lock protection is necessary
2932 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 counters.)
2934 However, it is possible, that they rely on protection
2935 made by us here.
2936
2937 Check this and shot the lock. It is not prone from deadlocks.
2938 Either shot noqueue qdisc, it is even simpler 8)
2939 */
2940 if (dev->flags & IFF_UP) {
2941 int cpu = smp_processor_id(); /* ok because BHs are off */
2942
David S. Millerc773e842008-07-08 23:13:53 -07002943 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944
Eric Dumazet745e20f2010-09-29 13:23:09 -07002945 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2946 goto recursion_alert;
2947
David S. Millerc773e842008-07-08 23:13:53 -07002948 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949
Tom Herbert734664982011-11-28 16:32:44 +00002950 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002951 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002952 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002953 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002954 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002955 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 goto out;
2957 }
2958 }
David S. Millerc773e842008-07-08 23:13:53 -07002959 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002960 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2961 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 } else {
2963 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002964 * unfortunately
2965 */
2966recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002967 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2968 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 }
2970 }
2971
2972 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002973 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 kfree_skb(skb);
2976 return rc;
2977out:
Herbert Xud4828d82006-06-22 02:28:18 -07002978 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 return rc;
2980}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002981EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982
2983
2984/*=======================================================================
2985 Receiver routines
2986 =======================================================================*/
2987
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002988int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002989EXPORT_SYMBOL(netdev_max_backlog);
2990
Eric Dumazet3b098e22010-05-15 23:57:10 -07002991int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002992int netdev_budget __read_mostly = 300;
2993int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002995/* Called with irq disabled */
2996static inline void ____napi_schedule(struct softnet_data *sd,
2997 struct napi_struct *napi)
2998{
2999 list_add_tail(&napi->poll_list, &sd->poll_list);
3000 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3001}
3002
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003003/*
3004 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
Tom Herbertbdeab992011-08-14 19:45:55 +00003005 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
3006 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
3007 * if hash is a canonical 4-tuple hash over transport ports.
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003008 */
Tom Herbertbdeab992011-08-14 19:45:55 +00003009void __skb_get_rxhash(struct sk_buff *skb)
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003010{
Eric Dumazet4504b862011-11-28 05:23:23 +00003011 struct flow_keys keys;
3012 u32 hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003013
Eric Dumazet4504b862011-11-28 05:23:23 +00003014 if (!skb_flow_dissect(skb, &keys))
3015 return;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003016
Chema Gonzalez68622342012-09-07 13:40:50 +00003017 if (keys.ports)
Eric Dumazet4504b862011-11-28 05:23:23 +00003018 skb->l4_rxhash = 1;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003019
3020 /* get a consistent hash (same value on both flow directions) */
Chema Gonzalez68622342012-09-07 13:40:50 +00003021 if (((__force u32)keys.dst < (__force u32)keys.src) ||
3022 (((__force u32)keys.dst == (__force u32)keys.src) &&
3023 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
Eric Dumazet4504b862011-11-28 05:23:23 +00003024 swap(keys.dst, keys.src);
Chema Gonzalez68622342012-09-07 13:40:50 +00003025 swap(keys.port16[0], keys.port16[1]);
3026 }
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003027
Eric Dumazet4504b862011-11-28 05:23:23 +00003028 hash = jhash_3words((__force u32)keys.dst,
3029 (__force u32)keys.src,
3030 (__force u32)keys.ports, hashrnd);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003031 if (!hash)
3032 hash = 1;
3033
Tom Herbertbdeab992011-08-14 19:45:55 +00003034 skb->rxhash = hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003035}
3036EXPORT_SYMBOL(__skb_get_rxhash);
3037
Eric Dumazetdf334542010-03-24 19:13:54 +00003038#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003039
3040/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003041struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003042EXPORT_SYMBOL(rps_sock_flow_table);
3043
Ingo Molnarc5905af2012-02-24 08:31:31 +01003044struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00003045
Ben Hutchingsc4454772011-01-19 11:03:53 +00003046static struct rps_dev_flow *
3047set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3048 struct rps_dev_flow *rflow, u16 next_cpu)
3049{
Ben Hutchings09994d12011-10-03 04:42:46 +00003050 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003051#ifdef CONFIG_RFS_ACCEL
3052 struct netdev_rx_queue *rxqueue;
3053 struct rps_dev_flow_table *flow_table;
3054 struct rps_dev_flow *old_rflow;
3055 u32 flow_id;
3056 u16 rxq_index;
3057 int rc;
3058
3059 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003060 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3061 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003062 goto out;
3063 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3064 if (rxq_index == skb_get_rx_queue(skb))
3065 goto out;
3066
3067 rxqueue = dev->_rx + rxq_index;
3068 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3069 if (!flow_table)
3070 goto out;
3071 flow_id = skb->rxhash & flow_table->mask;
3072 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3073 rxq_index, flow_id);
3074 if (rc < 0)
3075 goto out;
3076 old_rflow = rflow;
3077 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003078 rflow->filter = rc;
3079 if (old_rflow->filter == rflow->filter)
3080 old_rflow->filter = RPS_NO_FILTER;
3081 out:
3082#endif
3083 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003084 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003085 }
3086
Ben Hutchings09994d12011-10-03 04:42:46 +00003087 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003088 return rflow;
3089}
3090
Tom Herbert0a9627f2010-03-16 08:03:29 +00003091/*
3092 * get_rps_cpu is called from netif_receive_skb and returns the target
3093 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003094 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003095 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003096static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3097 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003098{
Tom Herbert0a9627f2010-03-16 08:03:29 +00003099 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003100 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07003101 struct rps_dev_flow_table *flow_table;
3102 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003103 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07003104 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003105
Tom Herbert0a9627f2010-03-16 08:03:29 +00003106 if (skb_rx_queue_recorded(skb)) {
3107 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003108 if (unlikely(index >= dev->real_num_rx_queues)) {
3109 WARN_ONCE(dev->real_num_rx_queues > 1,
3110 "%s received packet on queue %u, but number "
3111 "of RX queues is %u\n",
3112 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003113 goto done;
3114 }
3115 rxqueue = dev->_rx + index;
3116 } else
3117 rxqueue = dev->_rx;
3118
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003119 map = rcu_dereference(rxqueue->rps_map);
3120 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08003121 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003122 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003123 tcpu = map->cpus[0];
3124 if (cpu_online(tcpu))
3125 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003126 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003127 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003128 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003129 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003130 }
3131
Changli Gao2d47b452010-08-17 19:00:56 +00003132 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003133 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003134 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003135
Tom Herbertfec5e652010-04-16 16:01:27 -07003136 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3137 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3138 if (flow_table && sock_flow_table) {
3139 u16 next_cpu;
3140 struct rps_dev_flow *rflow;
3141
3142 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3143 tcpu = rflow->cpu;
3144
3145 next_cpu = sock_flow_table->ents[skb->rxhash &
3146 sock_flow_table->mask];
3147
3148 /*
3149 * If the desired CPU (where last recvmsg was done) is
3150 * different from current CPU (one in the rx-queue flow
3151 * table entry), switch if one of the following holds:
3152 * - Current CPU is unset (equal to RPS_NO_CPU).
3153 * - Current CPU is offline.
3154 * - The current CPU's queue tail has advanced beyond the
3155 * last packet that was enqueued using this table entry.
3156 * This guarantees that all previous packets for the flow
3157 * have been dequeued, thus preserving in order delivery.
3158 */
3159 if (unlikely(tcpu != next_cpu) &&
3160 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3161 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003162 rflow->last_qtail)) >= 0)) {
3163 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003164 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003165 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003166
Tom Herbertfec5e652010-04-16 16:01:27 -07003167 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3168 *rflowp = rflow;
3169 cpu = tcpu;
3170 goto done;
3171 }
3172 }
3173
Tom Herbert0a9627f2010-03-16 08:03:29 +00003174 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003175 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003176
3177 if (cpu_online(tcpu)) {
3178 cpu = tcpu;
3179 goto done;
3180 }
3181 }
3182
3183done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003184 return cpu;
3185}
3186
Ben Hutchingsc4454772011-01-19 11:03:53 +00003187#ifdef CONFIG_RFS_ACCEL
3188
3189/**
3190 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3191 * @dev: Device on which the filter was set
3192 * @rxq_index: RX queue index
3193 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3194 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3195 *
3196 * Drivers that implement ndo_rx_flow_steer() should periodically call
3197 * this function for each installed filter and remove the filters for
3198 * which it returns %true.
3199 */
3200bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3201 u32 flow_id, u16 filter_id)
3202{
3203 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3204 struct rps_dev_flow_table *flow_table;
3205 struct rps_dev_flow *rflow;
3206 bool expire = true;
3207 int cpu;
3208
3209 rcu_read_lock();
3210 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3211 if (flow_table && flow_id <= flow_table->mask) {
3212 rflow = &flow_table->flows[flow_id];
3213 cpu = ACCESS_ONCE(rflow->cpu);
3214 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3215 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3216 rflow->last_qtail) <
3217 (int)(10 * flow_table->mask)))
3218 expire = false;
3219 }
3220 rcu_read_unlock();
3221 return expire;
3222}
3223EXPORT_SYMBOL(rps_may_expire_flow);
3224
3225#endif /* CONFIG_RFS_ACCEL */
3226
Tom Herbert0a9627f2010-03-16 08:03:29 +00003227/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003228static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003229{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003230 struct softnet_data *sd = data;
3231
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003232 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003233 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003234}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003235
Tom Herbertfec5e652010-04-16 16:01:27 -07003236#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003237
3238/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003239 * Check if this softnet_data structure is another cpu one
3240 * If yes, queue it to our IPI list and return 1
3241 * If no, return 0
3242 */
3243static int rps_ipi_queued(struct softnet_data *sd)
3244{
3245#ifdef CONFIG_RPS
3246 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3247
3248 if (sd != mysd) {
3249 sd->rps_ipi_next = mysd->rps_ipi_list;
3250 mysd->rps_ipi_list = sd;
3251
3252 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3253 return 1;
3254 }
3255#endif /* CONFIG_RPS */
3256 return 0;
3257}
3258
3259/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003260 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3261 * queue (may be a remote CPU queue).
3262 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003263static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3264 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003265{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003266 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003267 unsigned long flags;
3268
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003269 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003270
3271 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003272
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003273 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003274 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3275 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003276enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003277 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003278 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003279 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003280 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003281 return NET_RX_SUCCESS;
3282 }
3283
Eric Dumazetebda37c22010-05-06 23:51:21 +00003284 /* Schedule NAPI for backlog device
3285 * We can use non atomic operation since we own the queue lock
3286 */
3287 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003288 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003289 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003290 }
3291 goto enqueue;
3292 }
3293
Changli Gaodee42872010-05-02 05:42:16 +00003294 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003295 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003296
Tom Herbert0a9627f2010-03-16 08:03:29 +00003297 local_irq_restore(flags);
3298
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003299 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003300 kfree_skb(skb);
3301 return NET_RX_DROP;
3302}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304/**
3305 * netif_rx - post buffer to the network code
3306 * @skb: buffer to post
3307 *
3308 * This function receives a packet from a device driver and queues it for
3309 * the upper (protocol) levels to process. It always succeeds. The buffer
3310 * may be dropped during processing for congestion control or by the
3311 * protocol layers.
3312 *
3313 * return values:
3314 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 * NET_RX_DROP (packet was dropped)
3316 *
3317 */
3318
3319int netif_rx(struct sk_buff *skb)
3320{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003321 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322
3323 /* if netpoll wants it, pretend we never saw it */
3324 if (netpoll_rx(skb))
3325 return NET_RX_DROP;
3326
Eric Dumazet588f0332011-11-15 04:12:55 +00003327 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328
Koki Sanagicf66ba52010-08-23 18:45:02 +09003329 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003330#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003331 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003332 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003333 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
Changli Gaocece1942010-08-07 20:35:43 -07003335 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003336 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003337
3338 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003339 if (cpu < 0)
3340 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003341
3342 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3343
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003344 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003345 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003346 } else
3347#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003348 {
3349 unsigned int qtail;
3350 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3351 put_cpu();
3352 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003353 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003355EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
3357int netif_rx_ni(struct sk_buff *skb)
3358{
3359 int err;
3360
3361 preempt_disable();
3362 err = netif_rx(skb);
3363 if (local_softirq_pending())
3364 do_softirq();
3365 preempt_enable();
3366
3367 return err;
3368}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369EXPORT_SYMBOL(netif_rx_ni);
3370
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371static void net_tx_action(struct softirq_action *h)
3372{
3373 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3374
3375 if (sd->completion_queue) {
3376 struct sk_buff *clist;
3377
3378 local_irq_disable();
3379 clist = sd->completion_queue;
3380 sd->completion_queue = NULL;
3381 local_irq_enable();
3382
3383 while (clist) {
3384 struct sk_buff *skb = clist;
3385 clist = clist->next;
3386
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003387 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003388 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 __kfree_skb(skb);
3390 }
3391 }
3392
3393 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003394 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395
3396 local_irq_disable();
3397 head = sd->output_queue;
3398 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003399 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 local_irq_enable();
3401
3402 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003403 struct Qdisc *q = head;
3404 spinlock_t *root_lock;
3405
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 head = head->next_sched;
3407
David S. Miller5fb66222008-08-02 20:02:43 -07003408 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003409 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003410 smp_mb__before_clear_bit();
3411 clear_bit(__QDISC_STATE_SCHED,
3412 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003413 qdisc_run(q);
3414 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003416 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003417 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003418 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003419 } else {
3420 smp_mb__before_clear_bit();
3421 clear_bit(__QDISC_STATE_SCHED,
3422 &q->state);
3423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 }
3425 }
3426 }
3427}
3428
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003429#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3430 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003431/* This hook is defined here for ATM LANE */
3432int (*br_fdb_test_addr_hook)(struct net_device *dev,
3433 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003434EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003435#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437#ifdef CONFIG_NET_CLS_ACT
3438/* TODO: Maybe we should just force sch_ingress to be compiled in
3439 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3440 * a compare and 2 stores extra right now if we dont have it on
3441 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003442 * NOTE: This doesn't stop any functionality; if you dont have
3443 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 *
3445 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003446static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003449 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003450 int result = TC_ACT_OK;
3451 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003452
Stephen Hemmingerde384832010-08-01 00:33:23 -07003453 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003454 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3455 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003456 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 }
3458
Herbert Xuf697c3e2007-10-14 00:38:47 -07003459 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3460 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3461
David S. Miller83874002008-07-17 00:53:03 -07003462 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003463 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003464 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003465 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3466 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003467 spin_unlock(qdisc_lock(q));
3468 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003469
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 return result;
3471}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003472
3473static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3474 struct packet_type **pt_prev,
3475 int *ret, struct net_device *orig_dev)
3476{
Eric Dumazet24824a02010-10-02 06:11:55 +00003477 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3478
3479 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003480 goto out;
3481
3482 if (*pt_prev) {
3483 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3484 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003485 }
3486
Eric Dumazet24824a02010-10-02 06:11:55 +00003487 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003488 case TC_ACT_SHOT:
3489 case TC_ACT_STOLEN:
3490 kfree_skb(skb);
3491 return NULL;
3492 }
3493
3494out:
3495 skb->tc_verd = 0;
3496 return skb;
3497}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498#endif
3499
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003500/**
3501 * netdev_rx_handler_register - register receive handler
3502 * @dev: device to register a handler for
3503 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003504 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003505 *
3506 * Register a receive hander for a device. This handler will then be
3507 * called from __netif_receive_skb. A negative errno code is returned
3508 * on a failure.
3509 *
3510 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003511 *
3512 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003513 */
3514int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003515 rx_handler_func_t *rx_handler,
3516 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003517{
3518 ASSERT_RTNL();
3519
3520 if (dev->rx_handler)
3521 return -EBUSY;
3522
Jiri Pirko93e2c322010-06-10 03:34:59 +00003523 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003524 rcu_assign_pointer(dev->rx_handler, rx_handler);
3525
3526 return 0;
3527}
3528EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3529
3530/**
3531 * netdev_rx_handler_unregister - unregister receive handler
3532 * @dev: device to unregister a handler from
3533 *
3534 * Unregister a receive hander from a device.
3535 *
3536 * The caller must hold the rtnl_mutex.
3537 */
3538void netdev_rx_handler_unregister(struct net_device *dev)
3539{
3540
3541 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003542 RCU_INIT_POINTER(dev->rx_handler, NULL);
3543 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003544}
3545EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3546
Mel Gormanb4b9e352012-07-31 16:44:26 -07003547/*
3548 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3549 * the special handling of PFMEMALLOC skbs.
3550 */
3551static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3552{
3553 switch (skb->protocol) {
3554 case __constant_htons(ETH_P_ARP):
3555 case __constant_htons(ETH_P_IP):
3556 case __constant_htons(ETH_P_IPV6):
3557 case __constant_htons(ETH_P_8021Q):
3558 return true;
3559 default:
3560 return false;
3561 }
3562}
3563
Eric Dumazet10f744d2010-03-28 23:07:20 -07003564static int __netif_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565{
3566 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003567 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003568 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003569 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003570 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003572 __be16 type;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003573 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574
Eric Dumazet588f0332011-11-15 04:12:55 +00003575 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003576
Koki Sanagicf66ba52010-08-23 18:45:02 +09003577 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003578
Mel Gormanb4b9e352012-07-31 16:44:26 -07003579 /*
3580 * PFMEMALLOC skbs are special, they should
3581 * - be delivered to SOCK_MEMALLOC sockets only
3582 * - stay away from userspace
3583 * - have bounded memory usage
3584 *
3585 * Use PF_MEMALLOC as this saves us from propagating the allocation
3586 * context down to all allocation sites.
3587 */
3588 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3589 current->flags |= PF_MEMALLOC;
3590
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003592 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003593 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003595 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003596
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003597 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003598 if (!skb_transport_header_was_set(skb))
3599 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003600 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601
3602 pt_prev = NULL;
3603
3604 rcu_read_lock();
3605
David S. Miller63d8ea72011-02-28 10:48:59 -08003606another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003607 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003608
3609 __this_cpu_inc(softnet_data.processed);
3610
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003611 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3612 skb = vlan_untag(skb);
3613 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003614 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003615 }
3616
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617#ifdef CONFIG_NET_CLS_ACT
3618 if (skb->tc_verd & TC_NCLS) {
3619 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3620 goto ncls;
3621 }
3622#endif
3623
Mel Gormanb4b9e352012-07-31 16:44:26 -07003624 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3625 goto skip_taps;
3626
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003628 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003629 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003630 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 pt_prev = ptype;
3632 }
3633 }
3634
Mel Gormanb4b9e352012-07-31 16:44:26 -07003635skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003637 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3638 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003639 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640ncls:
3641#endif
3642
Mel Gormanb4b9e352012-07-31 16:44:26 -07003643 if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3644 && !skb_pfmemalloc_protocol(skb))
3645 goto drop;
3646
John Fastabend24257172011-10-10 09:16:41 +00003647 if (vlan_tx_tag_present(skb)) {
3648 if (pt_prev) {
3649 ret = deliver_skb(skb, pt_prev, orig_dev);
3650 pt_prev = NULL;
3651 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003652 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003653 goto another_round;
3654 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003655 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003656 }
3657
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003658 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003659 if (rx_handler) {
3660 if (pt_prev) {
3661 ret = deliver_skb(skb, pt_prev, orig_dev);
3662 pt_prev = NULL;
3663 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003664 switch (rx_handler(&skb)) {
3665 case RX_HANDLER_CONSUMED:
Mel Gormanb4b9e352012-07-31 16:44:26 -07003666 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003667 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003668 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003669 case RX_HANDLER_EXACT:
3670 deliver_exact = true;
3671 case RX_HANDLER_PASS:
3672 break;
3673 default:
3674 BUG();
3675 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003678 if (vlan_tx_nonzero_tag_present(skb))
3679 skb->pkt_type = PACKET_OTHERHOST;
3680
David S. Miller63d8ea72011-02-28 10:48:59 -08003681 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003682 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003683
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003685 list_for_each_entry_rcu(ptype,
3686 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003687 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003688 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3689 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003690 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003691 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 pt_prev = ptype;
3693 }
3694 }
3695
3696 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003697 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003698 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003699 else
3700 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003702drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003703 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 kfree_skb(skb);
3705 /* Jamal, now you will not able to escape explaining
3706 * me how you were going to use this. :-)
3707 */
3708 ret = NET_RX_DROP;
3709 }
3710
Mel Gormanb4b9e352012-07-31 16:44:26 -07003711unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003713out:
3714 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 return ret;
3716}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003717
3718/**
3719 * netif_receive_skb - process receive buffer from network
3720 * @skb: buffer to process
3721 *
3722 * netif_receive_skb() is the main receive data processing function.
3723 * It always succeeds. The buffer may be dropped during processing
3724 * for congestion control or by the protocol layers.
3725 *
3726 * This function may only be called from softirq context and interrupts
3727 * should be enabled.
3728 *
3729 * Return values (usually ignored):
3730 * NET_RX_SUCCESS: no congestion
3731 * NET_RX_DROP: packet was dropped
3732 */
3733int netif_receive_skb(struct sk_buff *skb)
3734{
Eric Dumazet588f0332011-11-15 04:12:55 +00003735 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003736
Richard Cochranc1f19b52010-07-17 08:49:36 +00003737 if (skb_defer_rx_timestamp(skb))
3738 return NET_RX_SUCCESS;
3739
Eric Dumazetdf334542010-03-24 19:13:54 +00003740#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003741 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003742 struct rps_dev_flow voidflow, *rflow = &voidflow;
3743 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003744
Eric Dumazet3b098e22010-05-15 23:57:10 -07003745 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003746
Eric Dumazet3b098e22010-05-15 23:57:10 -07003747 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003748
Eric Dumazet3b098e22010-05-15 23:57:10 -07003749 if (cpu >= 0) {
3750 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3751 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003752 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003753 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003754 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003755 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003756#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003757 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003759EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760
Eric Dumazet88751272010-04-19 05:07:33 +00003761/* Network device is going away, flush any packets still pending
3762 * Called with irqs disabled.
3763 */
Changli Gao152102c2010-03-30 20:16:22 +00003764static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003765{
Changli Gao152102c2010-03-30 20:16:22 +00003766 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003767 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003768 struct sk_buff *skb, *tmp;
3769
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003770 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003771 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003772 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003773 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003774 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003775 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003776 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003777 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003778 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003779
3780 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3781 if (skb->dev == dev) {
3782 __skb_unlink(skb, &sd->process_queue);
3783 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003784 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003785 }
3786 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003787}
3788
Herbert Xud565b0a2008-12-15 23:38:52 -08003789static int napi_gro_complete(struct sk_buff *skb)
3790{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003791 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003792 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003793 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003794 int err = -ENOENT;
3795
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003796 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3797
Herbert Xufc59f9a2009-04-14 15:11:06 -07003798 if (NAPI_GRO_CB(skb)->count == 1) {
3799 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003800 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003801 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003802
3803 rcu_read_lock();
3804 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003805 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003806 continue;
3807
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003808 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003809 break;
3810 }
3811 rcu_read_unlock();
3812
3813 if (err) {
3814 WARN_ON(&ptype->list == head);
3815 kfree_skb(skb);
3816 return NET_RX_SUCCESS;
3817 }
3818
3819out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003820 return netif_receive_skb(skb);
3821}
3822
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003823/* napi->gro_list contains packets ordered by age.
3824 * youngest packets at the head of it.
3825 * Complete skbs in reverse order to reduce latencies.
3826 */
3827void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003828{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003829 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003830
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003831 /* scan list and build reverse chain */
3832 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3833 skb->prev = prev;
3834 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003835 }
3836
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003837 for (skb = prev; skb; skb = prev) {
3838 skb->next = NULL;
3839
3840 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3841 return;
3842
3843 prev = skb->prev;
3844 napi_gro_complete(skb);
3845 napi->gro_count--;
3846 }
3847
Herbert Xud565b0a2008-12-15 23:38:52 -08003848 napi->gro_list = NULL;
3849}
Eric Dumazet86cac582010-08-31 18:25:32 +00003850EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003851
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003852static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3853{
3854 struct sk_buff *p;
3855 unsigned int maclen = skb->dev->hard_header_len;
3856
3857 for (p = napi->gro_list; p; p = p->next) {
3858 unsigned long diffs;
3859
3860 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3861 diffs |= p->vlan_tci ^ skb->vlan_tci;
3862 if (maclen == ETH_HLEN)
3863 diffs |= compare_ether_header(skb_mac_header(p),
3864 skb_gro_mac_header(skb));
3865 else if (!diffs)
3866 diffs = memcmp(skb_mac_header(p),
3867 skb_gro_mac_header(skb),
3868 maclen);
3869 NAPI_GRO_CB(p)->same_flow = !diffs;
3870 NAPI_GRO_CB(p)->flush = 0;
3871 }
3872}
3873
Rami Rosenbb728822012-11-28 21:55:25 +00003874static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003875{
3876 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003877 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003878 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003879 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003880 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08003881 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003882 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003883
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003884 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003885 goto normal;
3886
David S. Miller21dc3302010-08-23 00:13:46 -07003887 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003888 goto normal;
3889
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003890 gro_list_prepare(napi, skb);
3891
Herbert Xud565b0a2008-12-15 23:38:52 -08003892 rcu_read_lock();
3893 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003894 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003895 continue;
3896
Herbert Xu86911732009-01-29 14:19:50 +00003897 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08003898 mac_len = skb->network_header - skb->mac_header;
3899 skb->mac_len = mac_len;
3900 NAPI_GRO_CB(skb)->same_flow = 0;
3901 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003902 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003903
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003904 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003905 break;
3906 }
3907 rcu_read_unlock();
3908
3909 if (&ptype->list == head)
3910 goto normal;
3911
Herbert Xu0da2afd52008-12-26 14:57:42 -08003912 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003913 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003914
Herbert Xud565b0a2008-12-15 23:38:52 -08003915 if (pp) {
3916 struct sk_buff *nskb = *pp;
3917
3918 *pp = nskb->next;
3919 nskb->next = NULL;
3920 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003921 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003922 }
3923
Herbert Xu0da2afd52008-12-26 14:57:42 -08003924 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003925 goto ok;
3926
Herbert Xu4ae55442009-02-08 18:00:36 +00003927 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003928 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003929
Herbert Xu4ae55442009-02-08 18:00:36 +00003930 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003931 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003932 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003933 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003934 skb->next = napi->gro_list;
3935 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003936 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003937
Herbert Xuad0f9902009-02-01 01:24:55 -08003938pull:
Herbert Xucb189782009-05-26 18:50:31 +00003939 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3940 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3941
3942 BUG_ON(skb->end - skb->tail < grow);
3943
3944 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3945
3946 skb->tail += grow;
3947 skb->data_len -= grow;
3948
3949 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003950 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003951
Eric Dumazet9e903e02011-10-18 21:00:24 +00003952 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003953 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003954 memmove(skb_shinfo(skb)->frags,
3955 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003956 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003957 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003958 }
3959
Herbert Xud565b0a2008-12-15 23:38:52 -08003960ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003961 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003962
3963normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003964 ret = GRO_NORMAL;
3965 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003966}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003967
Herbert Xu96e93ea2009-01-06 10:49:34 -08003968
Rami Rosenbb728822012-11-28 21:55:25 +00003969static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003970{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003971 switch (ret) {
3972 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003973 if (netif_receive_skb(skb))
3974 ret = GRO_DROP;
3975 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003976
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003977 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003978 kfree_skb(skb);
3979 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003980
Eric Dumazetdaa86542012-04-19 07:07:40 +00003981 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003982 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3983 kmem_cache_free(skbuff_head_cache, skb);
3984 else
3985 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003986 break;
3987
Ben Hutchings5b252f02009-10-29 07:17:09 +00003988 case GRO_HELD:
3989 case GRO_MERGED:
3990 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003991 }
3992
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003993 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003994}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003995
Eric Dumazetca07e432012-10-06 22:28:06 +00003996static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003997{
Eric Dumazetca07e432012-10-06 22:28:06 +00003998 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3999 const skb_frag_t *frag0 = &pinfo->frags[0];
4000
Herbert Xu78a478d2009-05-26 18:50:21 +00004001 NAPI_GRO_CB(skb)->data_offset = 0;
4002 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00004003 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00004004
Herbert Xu78d3fd02009-05-26 18:50:23 +00004005 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00004006 pinfo->nr_frags &&
4007 !PageHighMem(skb_frag_page(frag0))) {
4008 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4009 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00004010 }
Herbert Xu78a478d2009-05-26 18:50:21 +00004011}
Herbert Xu78a478d2009-05-26 18:50:21 +00004012
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004013gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004014{
Herbert Xu86911732009-01-29 14:19:50 +00004015 skb_gro_reset_offset(skb);
4016
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004017 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004018}
4019EXPORT_SYMBOL(napi_gro_receive);
4020
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004021static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004022{
Herbert Xu96e93ea2009-01-06 10:49:34 -08004023 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004024 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4025 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004026 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004027 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004028 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004029
4030 napi->skb = skb;
4031}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004032
Herbert Xu76620aa2009-04-16 02:02:07 -07004033struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004034{
Herbert Xu5d38a072009-01-04 16:13:40 -08004035 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004036
4037 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00004038 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
4039 if (skb)
4040 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004041 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004042 return skb;
4043}
Herbert Xu76620aa2009-04-16 02:02:07 -07004044EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004045
Rami Rosenbb728822012-11-28 21:55:25 +00004046static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004047 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004048{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004049 switch (ret) {
4050 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00004051 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00004052 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00004053
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004054 if (ret == GRO_HELD)
4055 skb_gro_pull(skb, -ETH_HLEN);
4056 else if (netif_receive_skb(skb))
4057 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004058 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004059
4060 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004061 case GRO_MERGED_FREE:
4062 napi_reuse_skb(napi, skb);
4063 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004064
4065 case GRO_MERGED:
4066 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004067 }
4068
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004069 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004070}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004071
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004072static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004073{
Herbert Xu76620aa2009-04-16 02:02:07 -07004074 struct sk_buff *skb = napi->skb;
4075 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00004076 unsigned int hlen;
4077 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07004078
4079 napi->skb = NULL;
4080
4081 skb_reset_mac_header(skb);
4082 skb_gro_reset_offset(skb);
4083
Herbert Xua5b1cf22009-05-26 18:50:28 +00004084 off = skb_gro_offset(skb);
4085 hlen = off + sizeof(*eth);
4086 eth = skb_gro_header_fast(skb, off);
4087 if (skb_gro_header_hard(skb, hlen)) {
4088 eth = skb_gro_header_slow(skb, hlen, off);
4089 if (unlikely(!eth)) {
4090 napi_reuse_skb(napi, skb);
4091 skb = NULL;
4092 goto out;
4093 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004094 }
4095
4096 skb_gro_pull(skb, sizeof(*eth));
4097
4098 /*
4099 * This works because the only protocols we care about don't require
4100 * special handling. We'll fix it up properly at the end.
4101 */
4102 skb->protocol = eth->h_proto;
4103
4104out:
4105 return skb;
4106}
Herbert Xu76620aa2009-04-16 02:02:07 -07004107
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004108gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004109{
4110 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004111
4112 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004113 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004114
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004115 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004116}
4117EXPORT_SYMBOL(napi_gro_frags);
4118
Eric Dumazete326bed2010-04-22 00:22:45 -07004119/*
4120 * net_rps_action sends any pending IPI's for rps.
4121 * Note: called with local irq disabled, but exits with local irq enabled.
4122 */
4123static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4124{
4125#ifdef CONFIG_RPS
4126 struct softnet_data *remsd = sd->rps_ipi_list;
4127
4128 if (remsd) {
4129 sd->rps_ipi_list = NULL;
4130
4131 local_irq_enable();
4132
4133 /* Send pending IPI's to kick RPS processing on remote cpus. */
4134 while (remsd) {
4135 struct softnet_data *next = remsd->rps_ipi_next;
4136
4137 if (cpu_online(remsd->cpu))
4138 __smp_call_function_single(remsd->cpu,
4139 &remsd->csd, 0);
4140 remsd = next;
4141 }
4142 } else
4143#endif
4144 local_irq_enable();
4145}
4146
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004147static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148{
4149 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004150 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151
Eric Dumazete326bed2010-04-22 00:22:45 -07004152#ifdef CONFIG_RPS
4153 /* Check if we have pending ipi, its better to send them now,
4154 * not waiting net_rx_action() end.
4155 */
4156 if (sd->rps_ipi_list) {
4157 local_irq_disable();
4158 net_rps_action_and_irq_enable(sd);
4159 }
4160#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004161 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004162 local_irq_disable();
4163 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004165 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166
Changli Gao6e7676c2010-04-27 15:07:33 -07004167 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004168 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004169 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004170 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004171 input_queue_head_incr(sd);
4172 if (++work >= quota) {
4173 local_irq_enable();
4174 return work;
4175 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177
Changli Gao6e7676c2010-04-27 15:07:33 -07004178 rps_lock(sd);
4179 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004180 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004181 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4182 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004183
Changli Gao6e7676c2010-04-27 15:07:33 -07004184 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004185 /*
4186 * Inline a custom version of __napi_complete().
4187 * only current cpu owns and manipulates this napi,
4188 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4189 * we can use a plain write instead of clear_bit(),
4190 * and we dont need an smp_mb() memory barrier.
4191 */
4192 list_del(&napi->poll_list);
4193 napi->state = 0;
4194
Changli Gao6e7676c2010-04-27 15:07:33 -07004195 quota = work + qlen;
4196 }
4197 rps_unlock(sd);
4198 }
4199 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004201 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202}
4203
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004204/**
4205 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004206 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004207 *
4208 * The entry's receive function will be scheduled to run
4209 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004210void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004211{
4212 unsigned long flags;
4213
4214 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004215 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004216 local_irq_restore(flags);
4217}
4218EXPORT_SYMBOL(__napi_schedule);
4219
Herbert Xud565b0a2008-12-15 23:38:52 -08004220void __napi_complete(struct napi_struct *n)
4221{
4222 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4223 BUG_ON(n->gro_list);
4224
4225 list_del(&n->poll_list);
4226 smp_mb__before_clear_bit();
4227 clear_bit(NAPI_STATE_SCHED, &n->state);
4228}
4229EXPORT_SYMBOL(__napi_complete);
4230
4231void napi_complete(struct napi_struct *n)
4232{
4233 unsigned long flags;
4234
4235 /*
4236 * don't let napi dequeue from the cpu poll list
4237 * just in case its running on a different cpu
4238 */
4239 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4240 return;
4241
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004242 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004243 local_irq_save(flags);
4244 __napi_complete(n);
4245 local_irq_restore(flags);
4246}
4247EXPORT_SYMBOL(napi_complete);
4248
4249void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4250 int (*poll)(struct napi_struct *, int), int weight)
4251{
4252 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004253 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004254 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004255 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004256 napi->poll = poll;
4257 napi->weight = weight;
4258 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004259 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004260#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004261 spin_lock_init(&napi->poll_lock);
4262 napi->poll_owner = -1;
4263#endif
4264 set_bit(NAPI_STATE_SCHED, &napi->state);
4265}
4266EXPORT_SYMBOL(netif_napi_add);
4267
4268void netif_napi_del(struct napi_struct *napi)
4269{
4270 struct sk_buff *skb, *next;
4271
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004272 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004273 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004274
4275 for (skb = napi->gro_list; skb; skb = next) {
4276 next = skb->next;
4277 skb->next = NULL;
4278 kfree_skb(skb);
4279 }
4280
4281 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004282 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004283}
4284EXPORT_SYMBOL(netif_napi_del);
4285
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286static void net_rx_action(struct softirq_action *h)
4287{
Eric Dumazete326bed2010-04-22 00:22:45 -07004288 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004289 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004290 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004291 void *have;
4292
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293 local_irq_disable();
4294
Eric Dumazete326bed2010-04-22 00:22:45 -07004295 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004296 struct napi_struct *n;
4297 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004299 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004300 * Allow this to run for 2 jiffies since which will allow
4301 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004302 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004303 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 goto softnet_break;
4305
4306 local_irq_enable();
4307
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004308 /* Even though interrupts have been re-enabled, this
4309 * access is safe because interrupts can only add new
4310 * entries to the tail of this list, and only ->poll()
4311 * calls can remove this head entry from the list.
4312 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004313 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004315 have = netpoll_poll_lock(n);
4316
4317 weight = n->weight;
4318
David S. Miller0a7606c2007-10-29 21:28:47 -07004319 /* This NAPI_STATE_SCHED test is for avoiding a race
4320 * with netpoll's poll_napi(). Only the entity which
4321 * obtains the lock and sees NAPI_STATE_SCHED set will
4322 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004323 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004324 */
4325 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004326 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004327 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004328 trace_napi_poll(n);
4329 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004330
4331 WARN_ON_ONCE(work > weight);
4332
4333 budget -= work;
4334
4335 local_irq_disable();
4336
4337 /* Drivers must not modify the NAPI state if they
4338 * consume the entire weight. In such cases this code
4339 * still "owns" the NAPI instance and therefore can
4340 * move the instance around on the list at-will.
4341 */
David S. Millerfed17f32008-01-07 21:00:40 -08004342 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004343 if (unlikely(napi_disable_pending(n))) {
4344 local_irq_enable();
4345 napi_complete(n);
4346 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004347 } else {
4348 if (n->gro_list) {
4349 /* flush too old packets
4350 * If HZ < 1000, flush all packets.
4351 */
4352 local_irq_enable();
4353 napi_gro_flush(n, HZ >= 1000);
4354 local_irq_disable();
4355 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004356 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004357 }
David S. Millerfed17f32008-01-07 21:00:40 -08004358 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004359
4360 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 }
4362out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004363 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004364
Chris Leechdb217332006-06-17 21:24:58 -07004365#ifdef CONFIG_NET_DMA
4366 /*
4367 * There may not be any more sk_buffs coming right now, so push
4368 * any pending DMA copies to hardware
4369 */
Dan Williams2ba05622009-01-06 11:38:14 -07004370 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004371#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004372
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 return;
4374
4375softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004376 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4378 goto out;
4379}
4380
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004381static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382
4383/**
4384 * register_gifconf - register a SIOCGIF handler
4385 * @family: Address family
4386 * @gifconf: Function handler
4387 *
4388 * Register protocol dependent address dumping routines. The handler
4389 * that is passed must not be freed or reused until it has been replaced
4390 * by another handler.
4391 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004392int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393{
4394 if (family >= NPROTO)
4395 return -EINVAL;
4396 gifconf_list[family] = gifconf;
4397 return 0;
4398}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004399EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400
4401
4402/*
4403 * Map an interface index to its name (SIOCGIFNAME)
4404 */
4405
4406/*
4407 * We need this ioctl for efficient implementation of the
4408 * if_indextoname() function required by the IPv6 API. Without
4409 * it, we would have to search all the interfaces to find a
4410 * match. --pb
4411 */
4412
Eric W. Biederman881d9662007-09-17 11:56:21 -07004413static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414{
4415 struct net_device *dev;
4416 struct ifreq ifr;
Brian Haleyc91f6df2012-11-26 05:21:08 +00004417 unsigned seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418
4419 /*
4420 * Fetch the caller's info block.
4421 */
4422
4423 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4424 return -EFAULT;
4425
Brian Haleyc91f6df2012-11-26 05:21:08 +00004426retry:
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004427 seq = read_seqcount_begin(&devnet_rename_seq);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004428 rcu_read_lock();
4429 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004431 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 return -ENODEV;
4433 }
4434
4435 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004436 rcu_read_unlock();
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004437 if (read_seqcount_retry(&devnet_rename_seq, seq))
Brian Haleyc91f6df2012-11-26 05:21:08 +00004438 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439
4440 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4441 return -EFAULT;
4442 return 0;
4443}
4444
4445/*
4446 * Perform a SIOCGIFCONF call. This structure will change
4447 * size eventually, and there is nothing I can do about it.
4448 * Thus we will need a 'compatibility mode'.
4449 */
4450
Eric W. Biederman881d9662007-09-17 11:56:21 -07004451static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452{
4453 struct ifconf ifc;
4454 struct net_device *dev;
4455 char __user *pos;
4456 int len;
4457 int total;
4458 int i;
4459
4460 /*
4461 * Fetch the caller's info block.
4462 */
4463
4464 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4465 return -EFAULT;
4466
4467 pos = ifc.ifc_buf;
4468 len = ifc.ifc_len;
4469
4470 /*
4471 * Loop over the interfaces, and write an info block for each.
4472 */
4473
4474 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004475 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 for (i = 0; i < NPROTO; i++) {
4477 if (gifconf_list[i]) {
4478 int done;
4479 if (!pos)
4480 done = gifconf_list[i](dev, NULL, 0);
4481 else
4482 done = gifconf_list[i](dev, pos + total,
4483 len - total);
4484 if (done < 0)
4485 return -EFAULT;
4486 total += done;
4487 }
4488 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490
4491 /*
4492 * All done. Write the updated control block back to the caller.
4493 */
4494 ifc.ifc_len = total;
4495
4496 /*
4497 * Both BSD and Solaris return 0 here, so we do too.
4498 */
4499 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4500}
4501
4502#ifdef CONFIG_PROC_FS
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004503
Eric Dumazet2def16a2012-04-02 22:33:02 +00004504#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004505
4506#define get_bucket(x) ((x) >> BUCKET_SPACE)
4507#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4508#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4509
Eric Dumazet2def16a2012-04-02 22:33:02 +00004510static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004511{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004512 struct net *net = seq_file_net(seq);
4513 struct net_device *dev;
4514 struct hlist_node *p;
4515 struct hlist_head *h;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004516 unsigned int count = 0, offset = get_offset(*pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004517
Eric Dumazet2def16a2012-04-02 22:33:02 +00004518 h = &net->dev_name_head[get_bucket(*pos)];
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004519 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004520 if (++count == offset)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004521 return dev;
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004522 }
4523
4524 return NULL;
4525}
4526
Eric Dumazet2def16a2012-04-02 22:33:02 +00004527static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004528{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004529 struct net_device *dev;
4530 unsigned int bucket;
4531
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004532 do {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004533 dev = dev_from_same_bucket(seq, pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004534 if (dev)
4535 return dev;
4536
Eric Dumazet2def16a2012-04-02 22:33:02 +00004537 bucket = get_bucket(*pos) + 1;
4538 *pos = set_bucket_offset(bucket, 1);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004539 } while (bucket < NETDEV_HASHENTRIES);
4540
4541 return NULL;
4542}
4543
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544/*
4545 * This is invoked by the /proc filesystem handler to display a device
4546 * in detail.
4547 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004549 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004551 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07004552 if (!*pos)
4553 return SEQ_START_TOKEN;
4554
Eric Dumazet2def16a2012-04-02 22:33:02 +00004555 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004556 return NULL;
Pavel Emelianov7562f872007-05-03 15:13:45 -07004557
Eric Dumazet2def16a2012-04-02 22:33:02 +00004558 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559}
4560
4561void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4562{
4563 ++*pos;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004564 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565}
4566
4567void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004568 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004570 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571}
4572
4573static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4574{
Eric Dumazet28172732010-07-07 14:58:56 -07004575 struct rtnl_link_stats64 temp;
4576 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00004578 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4579 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
Rusty Russell5a1b5892007-04-28 21:04:03 -07004580 dev->name, stats->rx_bytes, stats->rx_packets,
4581 stats->rx_errors,
4582 stats->rx_dropped + stats->rx_missed_errors,
4583 stats->rx_fifo_errors,
4584 stats->rx_length_errors + stats->rx_over_errors +
4585 stats->rx_crc_errors + stats->rx_frame_errors,
4586 stats->rx_compressed, stats->multicast,
4587 stats->tx_bytes, stats->tx_packets,
4588 stats->tx_errors, stats->tx_dropped,
4589 stats->tx_fifo_errors, stats->collisions,
4590 stats->tx_carrier_errors +
4591 stats->tx_aborted_errors +
4592 stats->tx_window_errors +
4593 stats->tx_heartbeat_errors,
4594 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595}
4596
4597/*
4598 * Called from the PROCfs module. This now uses the new arbitrary sized
4599 * /proc/net interface to create /proc/net/dev
4600 */
4601static int dev_seq_show(struct seq_file *seq, void *v)
4602{
4603 if (v == SEQ_START_TOKEN)
4604 seq_puts(seq, "Inter-| Receive "
4605 " | Transmit\n"
4606 " face |bytes packets errs drop fifo frame "
4607 "compressed multicast|bytes packets errs "
4608 "drop fifo colls carrier compressed\n");
4609 else
4610 dev_seq_printf_stats(seq, v);
4611 return 0;
4612}
4613
Changli Gaodee42872010-05-02 05:42:16 +00004614static struct softnet_data *softnet_get_online(loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615{
Changli Gaodee42872010-05-02 05:42:16 +00004616 struct softnet_data *sd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004618 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004619 if (cpu_online(*pos)) {
Changli Gaodee42872010-05-02 05:42:16 +00004620 sd = &per_cpu(softnet_data, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621 break;
4622 } else
4623 ++*pos;
Changli Gaodee42872010-05-02 05:42:16 +00004624 return sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625}
4626
4627static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4628{
4629 return softnet_get_online(pos);
4630}
4631
4632static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4633{
4634 ++*pos;
4635 return softnet_get_online(pos);
4636}
4637
4638static void softnet_seq_stop(struct seq_file *seq, void *v)
4639{
4640}
4641
4642static int softnet_seq_show(struct seq_file *seq, void *v)
4643{
Changli Gaodee42872010-05-02 05:42:16 +00004644 struct softnet_data *sd = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645
Tom Herbert0a9627f2010-03-16 08:03:29 +00004646 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Changli Gaodee42872010-05-02 05:42:16 +00004647 sd->processed, sd->dropped, sd->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07004648 0, 0, 0, 0, /* was fastroute */
Changli Gaodee42872010-05-02 05:42:16 +00004649 sd->cpu_collision, sd->received_rps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650 return 0;
4651}
4652
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004653static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 .start = dev_seq_start,
4655 .next = dev_seq_next,
4656 .stop = dev_seq_stop,
4657 .show = dev_seq_show,
4658};
4659
4660static int dev_seq_open(struct inode *inode, struct file *file)
4661{
Denis V. Luneve372c412007-11-19 22:31:54 -08004662 return seq_open_net(inode, file, &dev_seq_ops,
Eric Dumazet2def16a2012-04-02 22:33:02 +00004663 sizeof(struct seq_net_private));
Anton Blanchard5cac98d2011-11-27 21:14:46 +00004664}
4665
Arjan van de Ven9a321442007-02-12 00:55:35 -08004666static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 .owner = THIS_MODULE,
4668 .open = dev_seq_open,
4669 .read = seq_read,
4670 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08004671 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672};
4673
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004674static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 .start = softnet_seq_start,
4676 .next = softnet_seq_next,
4677 .stop = softnet_seq_stop,
4678 .show = softnet_seq_show,
4679};
4680
4681static int softnet_seq_open(struct inode *inode, struct file *file)
4682{
4683 return seq_open(file, &softnet_seq_ops);
4684}
4685
Arjan van de Ven9a321442007-02-12 00:55:35 -08004686static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687 .owner = THIS_MODULE,
4688 .open = softnet_seq_open,
4689 .read = seq_read,
4690 .llseek = seq_lseek,
4691 .release = seq_release,
4692};
4693
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004694static void *ptype_get_idx(loff_t pos)
4695{
4696 struct packet_type *pt = NULL;
4697 loff_t i = 0;
4698 int t;
4699
4700 list_for_each_entry_rcu(pt, &ptype_all, list) {
4701 if (i == pos)
4702 return pt;
4703 ++i;
4704 }
4705
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004706 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004707 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4708 if (i == pos)
4709 return pt;
4710 ++i;
4711 }
4712 }
4713 return NULL;
4714}
4715
4716static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004717 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004718{
4719 rcu_read_lock();
4720 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4721}
4722
4723static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4724{
4725 struct packet_type *pt;
4726 struct list_head *nxt;
4727 int hash;
4728
4729 ++*pos;
4730 if (v == SEQ_START_TOKEN)
4731 return ptype_get_idx(0);
4732
4733 pt = v;
4734 nxt = pt->list.next;
4735 if (pt->type == htons(ETH_P_ALL)) {
4736 if (nxt != &ptype_all)
4737 goto found;
4738 hash = 0;
4739 nxt = ptype_base[0].next;
4740 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004741 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004742
4743 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004744 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004745 return NULL;
4746 nxt = ptype_base[hash].next;
4747 }
4748found:
4749 return list_entry(nxt, struct packet_type, list);
4750}
4751
4752static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004753 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004754{
4755 rcu_read_unlock();
4756}
4757
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004758static int ptype_seq_show(struct seq_file *seq, void *v)
4759{
4760 struct packet_type *pt = v;
4761
4762 if (v == SEQ_START_TOKEN)
4763 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004764 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004765 if (pt->type == htons(ETH_P_ALL))
4766 seq_puts(seq, "ALL ");
4767 else
4768 seq_printf(seq, "%04x", ntohs(pt->type));
4769
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08004770 seq_printf(seq, " %-8s %pF\n",
4771 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004772 }
4773
4774 return 0;
4775}
4776
4777static const struct seq_operations ptype_seq_ops = {
4778 .start = ptype_seq_start,
4779 .next = ptype_seq_next,
4780 .stop = ptype_seq_stop,
4781 .show = ptype_seq_show,
4782};
4783
4784static int ptype_seq_open(struct inode *inode, struct file *file)
4785{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004786 return seq_open_net(inode, file, &ptype_seq_ops,
4787 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004788}
4789
4790static const struct file_operations ptype_seq_fops = {
4791 .owner = THIS_MODULE,
4792 .open = ptype_seq_open,
4793 .read = seq_read,
4794 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004795 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004796};
4797
4798
Pavel Emelyanov46650792007-10-08 20:38:39 -07004799static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800{
4801 int rc = -ENOMEM;
4802
Eric W. Biederman881d9662007-09-17 11:56:21 -07004803 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004805 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004807 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004808 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004809
Eric W. Biederman881d9662007-09-17 11:56:21 -07004810 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004811 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812 rc = 0;
4813out:
4814 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004815out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004816 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004818 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004820 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004821 goto out;
4822}
Eric W. Biederman881d9662007-09-17 11:56:21 -07004823
Pavel Emelyanov46650792007-10-08 20:38:39 -07004824static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004825{
4826 wext_proc_exit(net);
4827
4828 proc_net_remove(net, "ptype");
4829 proc_net_remove(net, "softnet_stat");
4830 proc_net_remove(net, "dev");
4831}
4832
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004833static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004834 .init = dev_proc_net_init,
4835 .exit = dev_proc_net_exit,
4836};
4837
4838static int __init dev_proc_init(void)
4839{
4840 return register_pernet_subsys(&dev_proc_ops);
4841}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842#else
4843#define dev_proc_init() 0
4844#endif /* CONFIG_PROC_FS */
4845
4846
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004847struct netdev_upper {
4848 struct net_device *dev;
4849 bool master;
4850 struct list_head list;
4851 struct rcu_head rcu;
4852 struct list_head search_list;
4853};
4854
4855static void __append_search_uppers(struct list_head *search_list,
4856 struct net_device *dev)
4857{
4858 struct netdev_upper *upper;
4859
4860 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4861 /* check if this upper is not already in search list */
4862 if (list_empty(&upper->search_list))
4863 list_add_tail(&upper->search_list, search_list);
4864 }
4865}
4866
4867static bool __netdev_search_upper_dev(struct net_device *dev,
4868 struct net_device *upper_dev)
4869{
4870 LIST_HEAD(search_list);
4871 struct netdev_upper *upper;
4872 struct netdev_upper *tmp;
4873 bool ret = false;
4874
4875 __append_search_uppers(&search_list, dev);
4876 list_for_each_entry(upper, &search_list, search_list) {
4877 if (upper->dev == upper_dev) {
4878 ret = true;
4879 break;
4880 }
4881 __append_search_uppers(&search_list, upper->dev);
4882 }
4883 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4884 INIT_LIST_HEAD(&upper->search_list);
4885 return ret;
4886}
4887
4888static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4889 struct net_device *upper_dev)
4890{
4891 struct netdev_upper *upper;
4892
4893 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4894 if (upper->dev == upper_dev)
4895 return upper;
4896 }
4897 return NULL;
4898}
4899
4900/**
4901 * netdev_has_upper_dev - Check if device is linked to an upper device
4902 * @dev: device
4903 * @upper_dev: upper device to check
4904 *
4905 * Find out if a device is linked to specified upper device and return true
4906 * in case it is. Note that this checks only immediate upper device,
4907 * not through a complete stack of devices. The caller must hold the RTNL lock.
4908 */
4909bool netdev_has_upper_dev(struct net_device *dev,
4910 struct net_device *upper_dev)
4911{
4912 ASSERT_RTNL();
4913
4914 return __netdev_find_upper(dev, upper_dev);
4915}
4916EXPORT_SYMBOL(netdev_has_upper_dev);
4917
4918/**
4919 * netdev_has_any_upper_dev - Check if device is linked to some device
4920 * @dev: device
4921 *
4922 * Find out if a device is linked to an upper device and return true in case
4923 * it is. The caller must hold the RTNL lock.
4924 */
4925bool netdev_has_any_upper_dev(struct net_device *dev)
4926{
4927 ASSERT_RTNL();
4928
4929 return !list_empty(&dev->upper_dev_list);
4930}
4931EXPORT_SYMBOL(netdev_has_any_upper_dev);
4932
4933/**
4934 * netdev_master_upper_dev_get - Get master upper device
4935 * @dev: device
4936 *
4937 * Find a master upper device and return pointer to it or NULL in case
4938 * it's not there. The caller must hold the RTNL lock.
4939 */
4940struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4941{
4942 struct netdev_upper *upper;
4943
4944 ASSERT_RTNL();
4945
4946 if (list_empty(&dev->upper_dev_list))
4947 return NULL;
4948
4949 upper = list_first_entry(&dev->upper_dev_list,
4950 struct netdev_upper, list);
4951 if (likely(upper->master))
4952 return upper->dev;
4953 return NULL;
4954}
4955EXPORT_SYMBOL(netdev_master_upper_dev_get);
4956
4957/**
4958 * netdev_master_upper_dev_get_rcu - Get master upper device
4959 * @dev: device
4960 *
4961 * Find a master upper device and return pointer to it or NULL in case
4962 * it's not there. The caller must hold the RCU read lock.
4963 */
4964struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4965{
4966 struct netdev_upper *upper;
4967
4968 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4969 struct netdev_upper, list);
4970 if (upper && likely(upper->master))
4971 return upper->dev;
4972 return NULL;
4973}
4974EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4975
4976static int __netdev_upper_dev_link(struct net_device *dev,
4977 struct net_device *upper_dev, bool master)
4978{
4979 struct netdev_upper *upper;
4980
4981 ASSERT_RTNL();
4982
4983 if (dev == upper_dev)
4984 return -EBUSY;
4985
4986 /* To prevent loops, check if dev is not upper device to upper_dev. */
4987 if (__netdev_search_upper_dev(upper_dev, dev))
4988 return -EBUSY;
4989
4990 if (__netdev_find_upper(dev, upper_dev))
4991 return -EEXIST;
4992
4993 if (master && netdev_master_upper_dev_get(dev))
4994 return -EBUSY;
4995
4996 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4997 if (!upper)
4998 return -ENOMEM;
4999
5000 upper->dev = upper_dev;
5001 upper->master = master;
5002 INIT_LIST_HEAD(&upper->search_list);
5003
5004 /* Ensure that master upper link is always the first item in list. */
5005 if (master)
5006 list_add_rcu(&upper->list, &dev->upper_dev_list);
5007 else
5008 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
5009 dev_hold(upper_dev);
5010
5011 return 0;
5012}
5013
5014/**
5015 * netdev_upper_dev_link - Add a link to the upper device
5016 * @dev: device
5017 * @upper_dev: new upper device
5018 *
5019 * Adds a link to device which is upper to this one. The caller must hold
5020 * the RTNL lock. On a failure a negative errno code is returned.
5021 * On success the reference counts are adjusted and the function
5022 * returns zero.
5023 */
5024int netdev_upper_dev_link(struct net_device *dev,
5025 struct net_device *upper_dev)
5026{
5027 return __netdev_upper_dev_link(dev, upper_dev, false);
5028}
5029EXPORT_SYMBOL(netdev_upper_dev_link);
5030
5031/**
5032 * netdev_master_upper_dev_link - Add a master link to the upper device
5033 * @dev: device
5034 * @upper_dev: new upper device
5035 *
5036 * Adds a link to device which is upper to this one. In this case, only
5037 * one master upper device can be linked, although other non-master devices
5038 * might be linked as well. The caller must hold the RTNL lock.
5039 * On a failure a negative errno code is returned. On success the reference
5040 * counts are adjusted and the function returns zero.
5041 */
5042int netdev_master_upper_dev_link(struct net_device *dev,
5043 struct net_device *upper_dev)
5044{
5045 return __netdev_upper_dev_link(dev, upper_dev, true);
5046}
5047EXPORT_SYMBOL(netdev_master_upper_dev_link);
5048
5049/**
5050 * netdev_upper_dev_unlink - Removes a link to upper device
5051 * @dev: device
5052 * @upper_dev: new upper device
5053 *
5054 * Removes a link to device which is upper to this one. The caller must hold
5055 * the RTNL lock.
5056 */
5057void netdev_upper_dev_unlink(struct net_device *dev,
5058 struct net_device *upper_dev)
5059{
5060 struct netdev_upper *upper;
5061
5062 ASSERT_RTNL();
5063
5064 upper = __netdev_find_upper(dev, upper_dev);
5065 if (!upper)
5066 return;
5067 list_del_rcu(&upper->list);
5068 dev_put(upper_dev);
5069 kfree_rcu(upper, rcu);
5070}
5071EXPORT_SYMBOL(netdev_upper_dev_unlink);
5072
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005073static void dev_change_rx_flags(struct net_device *dev, int flags)
5074{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005075 const struct net_device_ops *ops = dev->netdev_ops;
5076
5077 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
5078 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005079}
5080
Wang Chendad9b332008-06-18 01:48:28 -07005081static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07005082{
Eric Dumazetb536db92011-11-30 21:42:26 +00005083 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005084 kuid_t uid;
5085 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005086
Patrick McHardy24023452007-07-14 18:51:31 -07005087 ASSERT_RTNL();
5088
Wang Chendad9b332008-06-18 01:48:28 -07005089 dev->flags |= IFF_PROMISC;
5090 dev->promiscuity += inc;
5091 if (dev->promiscuity == 0) {
5092 /*
5093 * Avoid overflow.
5094 * If inc causes overflow, untouch promisc and return error.
5095 */
5096 if (inc < 0)
5097 dev->flags &= ~IFF_PROMISC;
5098 else {
5099 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005100 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5101 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005102 return -EOVERFLOW;
5103 }
5104 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005105 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005106 pr_info("device %s %s promiscuous mode\n",
5107 dev->name,
5108 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005109 if (audit_enabled) {
5110 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005111 audit_log(current->audit_context, GFP_ATOMIC,
5112 AUDIT_ANOM_PROMISCUOUS,
5113 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5114 dev->name, (dev->flags & IFF_PROMISC),
5115 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005116 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005117 from_kuid(&init_user_ns, uid),
5118 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005119 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005120 }
Patrick McHardy24023452007-07-14 18:51:31 -07005121
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005122 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005123 }
Wang Chendad9b332008-06-18 01:48:28 -07005124 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005125}
5126
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127/**
5128 * dev_set_promiscuity - update promiscuity count on a device
5129 * @dev: device
5130 * @inc: modifier
5131 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005132 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 * remains above zero the interface remains promiscuous. Once it hits zero
5134 * the device reverts back to normal filtering operation. A negative inc
5135 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005136 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137 */
Wang Chendad9b332008-06-18 01:48:28 -07005138int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139{
Eric Dumazetb536db92011-11-30 21:42:26 +00005140 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005141 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142
Wang Chendad9b332008-06-18 01:48:28 -07005143 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005144 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005145 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005146 if (dev->flags != old_flags)
5147 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005148 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005150EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151
5152/**
5153 * dev_set_allmulti - update allmulti count on a device
5154 * @dev: device
5155 * @inc: modifier
5156 *
5157 * Add or remove reception of all multicast frames to a device. While the
5158 * count in the device remains above zero the interface remains listening
5159 * to all interfaces. Once it hits zero the device reverts back to normal
5160 * filtering operation. A negative @inc value is used to drop the counter
5161 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07005162 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 */
5164
Wang Chendad9b332008-06-18 01:48:28 -07005165int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166{
Eric Dumazetb536db92011-11-30 21:42:26 +00005167 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168
Patrick McHardy24023452007-07-14 18:51:31 -07005169 ASSERT_RTNL();
5170
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005172 dev->allmulti += inc;
5173 if (dev->allmulti == 0) {
5174 /*
5175 * Avoid overflow.
5176 * If inc causes overflow, untouch allmulti and return error.
5177 */
5178 if (inc < 0)
5179 dev->flags &= ~IFF_ALLMULTI;
5180 else {
5181 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005182 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5183 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005184 return -EOVERFLOW;
5185 }
5186 }
Patrick McHardy24023452007-07-14 18:51:31 -07005187 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005188 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005189 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07005190 }
Wang Chendad9b332008-06-18 01:48:28 -07005191 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005192}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005193EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005194
5195/*
5196 * Upload unicast and multicast address lists to device and
5197 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005198 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005199 * are present.
5200 */
5201void __dev_set_rx_mode(struct net_device *dev)
5202{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005203 const struct net_device_ops *ops = dev->netdev_ops;
5204
Patrick McHardy4417da62007-06-27 01:28:10 -07005205 /* dev_open will call this function so the list will stay sane. */
5206 if (!(dev->flags&IFF_UP))
5207 return;
5208
5209 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005210 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005211
Jiri Pirko01789342011-08-16 06:29:00 +00005212 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005213 /* Unicast addresses changes may only happen under the rtnl,
5214 * therefore calling __dev_set_promiscuity here is safe.
5215 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005216 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005217 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07005218 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005219 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005220 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07005221 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005222 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005223 }
Jiri Pirko01789342011-08-16 06:29:00 +00005224
5225 if (ops->ndo_set_rx_mode)
5226 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005227}
5228
5229void dev_set_rx_mode(struct net_device *dev)
5230{
David S. Millerb9e40852008-07-15 00:15:08 -07005231 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005232 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005233 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234}
5235
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005236/**
5237 * dev_get_flags - get flags reported to userspace
5238 * @dev: device
5239 *
5240 * Get the combination of flag bits exported through APIs to userspace.
5241 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005242unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243{
Eric Dumazet95c96172012-04-15 05:58:06 +00005244 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245
5246 flags = (dev->flags & ~(IFF_PROMISC |
5247 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005248 IFF_RUNNING |
5249 IFF_LOWER_UP |
5250 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251 (dev->gflags & (IFF_PROMISC |
5252 IFF_ALLMULTI));
5253
Stefan Rompfb00055a2006-03-20 17:09:11 -08005254 if (netif_running(dev)) {
5255 if (netif_oper_up(dev))
5256 flags |= IFF_RUNNING;
5257 if (netif_carrier_ok(dev))
5258 flags |= IFF_LOWER_UP;
5259 if (netif_dormant(dev))
5260 flags |= IFF_DORMANT;
5261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262
5263 return flags;
5264}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005265EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266
Patrick McHardybd380812010-02-26 06:34:53 +00005267int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268{
Eric Dumazetb536db92011-11-30 21:42:26 +00005269 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005270 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271
Patrick McHardy24023452007-07-14 18:51:31 -07005272 ASSERT_RTNL();
5273
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274 /*
5275 * Set the flags on our device.
5276 */
5277
5278 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5279 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5280 IFF_AUTOMEDIA)) |
5281 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5282 IFF_ALLMULTI));
5283
5284 /*
5285 * Load in the correct multicast list now the flags have changed.
5286 */
5287
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005288 if ((old_flags ^ flags) & IFF_MULTICAST)
5289 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005290
Patrick McHardy4417da62007-06-27 01:28:10 -07005291 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292
5293 /*
5294 * Have we downed the interface. We handle IFF_UP ourselves
5295 * according to user attempts to set it, rather than blindly
5296 * setting it.
5297 */
5298
5299 ret = 0;
5300 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005301 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302
5303 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005304 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 }
5306
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005308 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5309
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310 dev->gflags ^= IFF_PROMISC;
5311 dev_set_promiscuity(dev, inc);
5312 }
5313
5314 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5315 is important. Some (broken) drivers set IFF_PROMISC, when
5316 IFF_ALLMULTI is requested not asking us and not reporting.
5317 */
5318 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005319 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5320
Linus Torvalds1da177e2005-04-16 15:20:36 -07005321 dev->gflags ^= IFF_ALLMULTI;
5322 dev_set_allmulti(dev, inc);
5323 }
5324
Patrick McHardybd380812010-02-26 06:34:53 +00005325 return ret;
5326}
5327
5328void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
5329{
5330 unsigned int changes = dev->flags ^ old_flags;
5331
5332 if (changes & IFF_UP) {
5333 if (dev->flags & IFF_UP)
5334 call_netdevice_notifiers(NETDEV_UP, dev);
5335 else
5336 call_netdevice_notifiers(NETDEV_DOWN, dev);
5337 }
5338
5339 if (dev->flags & IFF_UP &&
5340 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
5341 call_netdevice_notifiers(NETDEV_CHANGE, dev);
5342}
5343
5344/**
5345 * dev_change_flags - change device settings
5346 * @dev: device
5347 * @flags: device state flags
5348 *
5349 * Change settings on device based state flags. The flags are
5350 * in the userspace exported format.
5351 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005352int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005353{
Eric Dumazetb536db92011-11-30 21:42:26 +00005354 int ret;
5355 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005356
5357 ret = __dev_change_flags(dev, flags);
5358 if (ret < 0)
5359 return ret;
5360
5361 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07005362 if (changes)
5363 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005364
Patrick McHardybd380812010-02-26 06:34:53 +00005365 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005366 return ret;
5367}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005368EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005369
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005370/**
5371 * dev_set_mtu - Change maximum transfer unit
5372 * @dev: device
5373 * @new_mtu: new transfer unit
5374 *
5375 * Change the maximum transfer size of the network device.
5376 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377int dev_set_mtu(struct net_device *dev, int new_mtu)
5378{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005379 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005380 int err;
5381
5382 if (new_mtu == dev->mtu)
5383 return 0;
5384
5385 /* MTU must be positive. */
5386 if (new_mtu < 0)
5387 return -EINVAL;
5388
5389 if (!netif_device_present(dev))
5390 return -ENODEV;
5391
5392 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005393 if (ops->ndo_change_mtu)
5394 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 else
5396 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005397
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005398 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005399 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400 return err;
5401}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005402EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005404/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005405 * dev_set_group - Change group this device belongs to
5406 * @dev: device
5407 * @new_group: group this device should belong to
5408 */
5409void dev_set_group(struct net_device *dev, int new_group)
5410{
5411 dev->group = new_group;
5412}
5413EXPORT_SYMBOL(dev_set_group);
5414
5415/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005416 * dev_set_mac_address - Change Media Access Control Address
5417 * @dev: device
5418 * @sa: new address
5419 *
5420 * Change the hardware (MAC) address of the device
5421 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5423{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005424 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 int err;
5426
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005427 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428 return -EOPNOTSUPP;
5429 if (sa->sa_family != dev->type)
5430 return -EINVAL;
5431 if (!netif_device_present(dev))
5432 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005433 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005434 if (err)
5435 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005436 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005437 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005438 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005439 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005441EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005443/**
5444 * dev_change_carrier - Change device carrier
5445 * @dev: device
5446 * @new_carries: new value
5447 *
5448 * Change device carrier
5449 */
5450int dev_change_carrier(struct net_device *dev, bool new_carrier)
5451{
5452 const struct net_device_ops *ops = dev->netdev_ops;
5453
5454 if (!ops->ndo_change_carrier)
5455 return -EOPNOTSUPP;
5456 if (!netif_device_present(dev))
5457 return -ENODEV;
5458 return ops->ndo_change_carrier(dev, new_carrier);
5459}
5460EXPORT_SYMBOL(dev_change_carrier);
5461
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00005463 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07005465static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466{
5467 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00005468 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469
5470 if (!dev)
5471 return -ENODEV;
5472
5473 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005474 case SIOCGIFFLAGS: /* Get interface flags */
5475 ifr->ifr_flags = (short) dev_get_flags(dev);
5476 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005478 case SIOCGIFMETRIC: /* Get the metric on the interface
5479 (currently unused) */
5480 ifr->ifr_metric = 0;
5481 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005483 case SIOCGIFMTU: /* Get the MTU of a device */
5484 ifr->ifr_mtu = dev->mtu;
5485 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005486
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005487 case SIOCGIFHWADDR:
5488 if (!dev->addr_len)
5489 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5490 else
5491 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5492 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5493 ifr->ifr_hwaddr.sa_family = dev->type;
5494 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005496 case SIOCGIFSLAVE:
5497 err = -EINVAL;
5498 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005499
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005500 case SIOCGIFMAP:
5501 ifr->ifr_map.mem_start = dev->mem_start;
5502 ifr->ifr_map.mem_end = dev->mem_end;
5503 ifr->ifr_map.base_addr = dev->base_addr;
5504 ifr->ifr_map.irq = dev->irq;
5505 ifr->ifr_map.dma = dev->dma;
5506 ifr->ifr_map.port = dev->if_port;
5507 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005508
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005509 case SIOCGIFINDEX:
5510 ifr->ifr_ifindex = dev->ifindex;
5511 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005512
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005513 case SIOCGIFTXQLEN:
5514 ifr->ifr_qlen = dev->tx_queue_len;
5515 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005516
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005517 default:
5518 /* dev_ioctl() should ensure this case
5519 * is never reached
5520 */
5521 WARN_ON(1);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005522 err = -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005523 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005524
5525 }
5526 return err;
5527}
5528
5529/*
5530 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5531 */
5532static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5533{
5534 int err;
5535 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005536 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005537
5538 if (!dev)
5539 return -ENODEV;
5540
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005541 ops = dev->netdev_ops;
5542
Jeff Garzik14e3e072007-10-08 00:06:32 -07005543 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005544 case SIOCSIFFLAGS: /* Set interface flags */
5545 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005546
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005547 case SIOCSIFMETRIC: /* Set the metric on the interface
5548 (currently unused) */
5549 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005550
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005551 case SIOCSIFMTU: /* Set the MTU of a device */
5552 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005553
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005554 case SIOCSIFHWADDR:
5555 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005556
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005557 case SIOCSIFHWBROADCAST:
5558 if (ifr->ifr_hwaddr.sa_family != dev->type)
5559 return -EINVAL;
5560 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5561 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5562 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5563 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005564
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005565 case SIOCSIFMAP:
5566 if (ops->ndo_set_config) {
5567 if (!netif_device_present(dev))
5568 return -ENODEV;
5569 return ops->ndo_set_config(dev, &ifr->ifr_map);
5570 }
5571 return -EOPNOTSUPP;
5572
5573 case SIOCADDMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005574 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005575 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5576 return -EINVAL;
5577 if (!netif_device_present(dev))
5578 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005579 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005580
5581 case SIOCDELMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005582 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005583 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5584 return -EINVAL;
5585 if (!netif_device_present(dev))
5586 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005587 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005588
5589 case SIOCSIFTXQLEN:
5590 if (ifr->ifr_qlen < 0)
5591 return -EINVAL;
5592 dev->tx_queue_len = ifr->ifr_qlen;
5593 return 0;
5594
5595 case SIOCSIFNAME:
5596 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5597 return dev_change_name(dev, ifr->ifr_newname);
5598
Richard Cochran4dc360c2011-10-19 17:00:35 -04005599 case SIOCSHWTSTAMP:
5600 err = net_hwtstamp_validate(ifr);
5601 if (err)
5602 return err;
5603 /* fall through */
5604
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005605 /*
5606 * Unknown or private ioctl
5607 */
5608 default:
5609 if ((cmd >= SIOCDEVPRIVATE &&
5610 cmd <= SIOCDEVPRIVATE + 15) ||
5611 cmd == SIOCBONDENSLAVE ||
5612 cmd == SIOCBONDRELEASE ||
5613 cmd == SIOCBONDSETHWADDR ||
5614 cmd == SIOCBONDSLAVEINFOQUERY ||
5615 cmd == SIOCBONDINFOQUERY ||
5616 cmd == SIOCBONDCHANGEACTIVE ||
5617 cmd == SIOCGMIIPHY ||
5618 cmd == SIOCGMIIREG ||
5619 cmd == SIOCSMIIREG ||
5620 cmd == SIOCBRADDIF ||
5621 cmd == SIOCBRDELIF ||
5622 cmd == SIOCSHWTSTAMP ||
5623 cmd == SIOCWANDEV) {
5624 err = -EOPNOTSUPP;
5625 if (ops->ndo_do_ioctl) {
5626 if (netif_device_present(dev))
5627 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5628 else
5629 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005630 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005631 } else
5632 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633
5634 }
5635 return err;
5636}
5637
5638/*
5639 * This function handles all "interface"-type I/O control requests. The actual
5640 * 'doing' part of this is dev_ifsioc above.
5641 */
5642
5643/**
5644 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005645 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646 * @cmd: command to issue
5647 * @arg: pointer to a struct ifreq in user space
5648 *
5649 * Issue ioctl functions to devices. This is normally called by the
5650 * user space syscall interfaces but can sometimes be useful for
5651 * other purposes. The return value is the return from the syscall if
5652 * positive or a negative errno code on error.
5653 */
5654
Eric W. Biederman881d9662007-09-17 11:56:21 -07005655int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656{
5657 struct ifreq ifr;
5658 int ret;
5659 char *colon;
5660
5661 /* One special case: SIOCGIFCONF takes ifconf argument
5662 and requires shared lock, because it sleeps writing
5663 to user space.
5664 */
5665
5666 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005667 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005668 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005669 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005670 return ret;
5671 }
5672 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005673 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674
5675 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5676 return -EFAULT;
5677
5678 ifr.ifr_name[IFNAMSIZ-1] = 0;
5679
5680 colon = strchr(ifr.ifr_name, ':');
5681 if (colon)
5682 *colon = 0;
5683
5684 /*
5685 * See which interface the caller is talking about.
5686 */
5687
5688 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005689 /*
5690 * These ioctl calls:
5691 * - can be done by all.
5692 * - atomic and do not require locking.
5693 * - return a value
5694 */
5695 case SIOCGIFFLAGS:
5696 case SIOCGIFMETRIC:
5697 case SIOCGIFMTU:
5698 case SIOCGIFHWADDR:
5699 case SIOCGIFSLAVE:
5700 case SIOCGIFMAP:
5701 case SIOCGIFINDEX:
5702 case SIOCGIFTXQLEN:
5703 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005704 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005705 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005706 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005707 if (!ret) {
5708 if (colon)
5709 *colon = ':';
5710 if (copy_to_user(arg, &ifr,
5711 sizeof(struct ifreq)))
5712 ret = -EFAULT;
5713 }
5714 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005716 case SIOCETHTOOL:
5717 dev_load(net, ifr.ifr_name);
5718 rtnl_lock();
5719 ret = dev_ethtool(net, &ifr);
5720 rtnl_unlock();
5721 if (!ret) {
5722 if (colon)
5723 *colon = ':';
5724 if (copy_to_user(arg, &ifr,
5725 sizeof(struct ifreq)))
5726 ret = -EFAULT;
5727 }
5728 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005729
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005730 /*
5731 * These ioctl calls:
5732 * - require superuser power.
5733 * - require strict serialization.
5734 * - return a value
5735 */
5736 case SIOCGMIIPHY:
5737 case SIOCGMIIREG:
5738 case SIOCSIFNAME:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005739 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005740 return -EPERM;
5741 dev_load(net, ifr.ifr_name);
5742 rtnl_lock();
5743 ret = dev_ifsioc(net, &ifr, cmd);
5744 rtnl_unlock();
5745 if (!ret) {
5746 if (colon)
5747 *colon = ':';
5748 if (copy_to_user(arg, &ifr,
5749 sizeof(struct ifreq)))
5750 ret = -EFAULT;
5751 }
5752 return ret;
5753
5754 /*
5755 * These ioctl calls:
5756 * - require superuser power.
5757 * - require strict serialization.
5758 * - do not return a value
5759 */
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005760 case SIOCSIFMAP:
5761 case SIOCSIFTXQLEN:
5762 if (!capable(CAP_NET_ADMIN))
5763 return -EPERM;
5764 /* fall through */
5765 /*
5766 * These ioctl calls:
5767 * - require local superuser power.
5768 * - require strict serialization.
5769 * - do not return a value
5770 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005771 case SIOCSIFFLAGS:
5772 case SIOCSIFMETRIC:
5773 case SIOCSIFMTU:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005774 case SIOCSIFHWADDR:
5775 case SIOCSIFSLAVE:
5776 case SIOCADDMULTI:
5777 case SIOCDELMULTI:
5778 case SIOCSIFHWBROADCAST:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005779 case SIOCSMIIREG:
5780 case SIOCBONDENSLAVE:
5781 case SIOCBONDRELEASE:
5782 case SIOCBONDSETHWADDR:
5783 case SIOCBONDCHANGEACTIVE:
5784 case SIOCBRADDIF:
5785 case SIOCBRDELIF:
5786 case SIOCSHWTSTAMP:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005787 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005788 return -EPERM;
5789 /* fall through */
5790 case SIOCBONDSLAVEINFOQUERY:
5791 case SIOCBONDINFOQUERY:
5792 dev_load(net, ifr.ifr_name);
5793 rtnl_lock();
5794 ret = dev_ifsioc(net, &ifr, cmd);
5795 rtnl_unlock();
5796 return ret;
5797
5798 case SIOCGIFMEM:
5799 /* Get the per device memory space. We can add this but
5800 * currently do not support it */
5801 case SIOCSIFMEM:
5802 /* Set the per device memory buffer space.
5803 * Not applicable in our case */
5804 case SIOCSIFLINK:
Lifeng Sun41c31f32011-04-27 22:04:51 +00005805 return -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005806
5807 /*
5808 * Unknown or private ioctl.
5809 */
5810 default:
5811 if (cmd == SIOCWANDEV ||
5812 (cmd >= SIOCDEVPRIVATE &&
5813 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005814 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005815 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005816 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005818 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005819 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005820 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005821 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005822 }
5823 /* Take care of Wireless Extensions */
5824 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5825 return wext_handle_ioctl(net, &ifr, cmd, arg);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005826 return -ENOTTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005827 }
5828}
5829
5830
5831/**
5832 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005833 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005834 *
5835 * Returns a suitable unique value for a new device interface
5836 * number. The caller must hold the rtnl semaphore or the
5837 * dev_base_lock to be sure it remains unique.
5838 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005839static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005840{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005841 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842 for (;;) {
5843 if (++ifindex <= 0)
5844 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005845 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005846 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005847 }
5848}
5849
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005851static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005852
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005853static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005854{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005856}
5857
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005858static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005859{
Krishna Kumare93737b2009-12-08 22:26:02 +00005860 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005861
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005862 BUG_ON(dev_boot_phase);
5863 ASSERT_RTNL();
5864
Krishna Kumare93737b2009-12-08 22:26:02 +00005865 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005866 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005867 * for initialization unwind. Remove those
5868 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005869 */
5870 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005871 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5872 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005873
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005874 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005875 list_del(&dev->unreg_list);
5876 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005877 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005878 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005879 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005880 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005881
Octavian Purdila44345722010-12-13 12:44:07 +00005882 /* If device is running, close it first. */
5883 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005884
Octavian Purdila44345722010-12-13 12:44:07 +00005885 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005886 /* And unlink it from device chain. */
5887 unlist_netdevice(dev);
5888
5889 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005890 }
5891
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005892 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005893
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005894 list_for_each_entry(dev, head, unreg_list) {
5895 /* Shutdown queueing discipline. */
5896 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005897
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005898
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005899 /* Notify protocols, that we are about to destroy
5900 this device. They should clean all the things.
5901 */
5902 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5903
Patrick McHardya2835762010-02-26 06:34:51 +00005904 if (!dev->rtnl_link_ops ||
5905 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5906 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5907
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005908 /*
5909 * Flush the unicast and multicast chains
5910 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005911 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005912 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005913
5914 if (dev->netdev_ops->ndo_uninit)
5915 dev->netdev_ops->ndo_uninit(dev);
5916
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005917 /* Notifier chain MUST detach us all upper devices. */
5918 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005919
5920 /* Remove entries from kobject tree */
5921 netdev_unregister_kobject(dev);
5922 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005923
Eric W. Biederman850a5452011-10-13 22:25:23 +00005924 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005925
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005926 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005927 dev_put(dev);
5928}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005929
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005930static void rollback_registered(struct net_device *dev)
5931{
5932 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005933
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005934 list_add(&dev->unreg_list, &single);
5935 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005936 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005937}
5938
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005939static netdev_features_t netdev_fix_features(struct net_device *dev,
5940 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005941{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005942 /* Fix illegal checksum combinations */
5943 if ((features & NETIF_F_HW_CSUM) &&
5944 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005945 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005946 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5947 }
5948
Herbert Xub63365a2008-10-23 01:11:29 -07005949 /* Fix illegal SG+CSUM combinations. */
5950 if ((features & NETIF_F_SG) &&
5951 !(features & NETIF_F_ALL_CSUM)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005952 netdev_dbg(dev,
5953 "Dropping NETIF_F_SG since no checksum feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005954 features &= ~NETIF_F_SG;
5955 }
5956
5957 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005958 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005959 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005960 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005961 }
5962
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005963 /* TSO ECN requires that TSO is present as well. */
5964 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5965 features &= ~NETIF_F_TSO_ECN;
5966
Michał Mirosław212b5732011-02-15 16:59:16 +00005967 /* Software GSO depends on SG. */
5968 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005969 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005970 features &= ~NETIF_F_GSO;
5971 }
5972
Michał Mirosławacd11302011-01-24 15:45:15 -08005973 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005974 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005975 /* maybe split UFO into V4 and V6? */
5976 if (!((features & NETIF_F_GEN_CSUM) ||
5977 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5978 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005979 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005980 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005981 features &= ~NETIF_F_UFO;
5982 }
5983
5984 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005985 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005986 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005987 features &= ~NETIF_F_UFO;
5988 }
5989 }
5990
5991 return features;
5992}
Herbert Xub63365a2008-10-23 01:11:29 -07005993
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005994int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005995{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005996 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005997 int err = 0;
5998
Michał Mirosław87267482011-04-12 09:56:38 +00005999 ASSERT_RTNL();
6000
Michał Mirosław5455c692011-02-15 16:59:17 +00006001 features = netdev_get_wanted_features(dev);
6002
6003 if (dev->netdev_ops->ndo_fix_features)
6004 features = dev->netdev_ops->ndo_fix_features(dev, features);
6005
6006 /* driver might be less strict about feature dependencies */
6007 features = netdev_fix_features(dev, features);
6008
6009 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006010 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00006011
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006012 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6013 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00006014
6015 if (dev->netdev_ops->ndo_set_features)
6016 err = dev->netdev_ops->ndo_set_features(dev, features);
6017
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006018 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00006019 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006020 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6021 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006022 return -1;
6023 }
6024
6025 if (!err)
6026 dev->features = features;
6027
6028 return 1;
6029}
6030
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006031/**
6032 * netdev_update_features - recalculate device features
6033 * @dev: the device to check
6034 *
6035 * Recalculate dev->features set and send notifications if it
6036 * has changed. Should be called after driver or hardware dependent
6037 * conditions might have changed that influence the features.
6038 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006039void netdev_update_features(struct net_device *dev)
6040{
6041 if (__netdev_update_features(dev))
6042 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00006043}
6044EXPORT_SYMBOL(netdev_update_features);
6045
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006047 * netdev_change_features - recalculate device features
6048 * @dev: the device to check
6049 *
6050 * Recalculate dev->features set and send notifications even
6051 * if they have not changed. Should be called instead of
6052 * netdev_update_features() if also dev->vlan_features might
6053 * have changed to allow the changes to be propagated to stacked
6054 * VLAN devices.
6055 */
6056void netdev_change_features(struct net_device *dev)
6057{
6058 __netdev_update_features(dev);
6059 netdev_features_change(dev);
6060}
6061EXPORT_SYMBOL(netdev_change_features);
6062
6063/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006064 * netif_stacked_transfer_operstate - transfer operstate
6065 * @rootdev: the root or lower level device to transfer state from
6066 * @dev: the device to transfer operstate to
6067 *
6068 * Transfer operational state from root to device. This is normally
6069 * called when a stacking relationship exists between the root
6070 * device and the device(a leaf device).
6071 */
6072void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6073 struct net_device *dev)
6074{
6075 if (rootdev->operstate == IF_OPER_DORMANT)
6076 netif_dormant_on(dev);
6077 else
6078 netif_dormant_off(dev);
6079
6080 if (netif_carrier_ok(rootdev)) {
6081 if (!netif_carrier_ok(dev))
6082 netif_carrier_on(dev);
6083 } else {
6084 if (netif_carrier_ok(dev))
6085 netif_carrier_off(dev);
6086 }
6087}
6088EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6089
Tom Herbertbf264142010-11-26 08:36:09 +00006090#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006091static int netif_alloc_rx_queues(struct net_device *dev)
6092{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006093 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006094 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006095
Tom Herbertbd25fa72010-10-18 18:00:16 +00006096 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006097
Tom Herbertbd25fa72010-10-18 18:00:16 +00006098 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6099 if (!rx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006100 pr_err("netdev: Unable to allocate %u rx queues\n", count);
Tom Herbertbd25fa72010-10-18 18:00:16 +00006101 return -ENOMEM;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006102 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00006103 dev->_rx = rx;
6104
Tom Herbertbd25fa72010-10-18 18:00:16 +00006105 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006106 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006107 return 0;
6108}
Tom Herbertbf264142010-11-26 08:36:09 +00006109#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006110
Changli Gaoaa942102010-12-04 02:31:41 +00006111static void netdev_init_one_queue(struct net_device *dev,
6112 struct netdev_queue *queue, void *_unused)
6113{
6114 /* Initialize queue lock */
6115 spin_lock_init(&queue->_xmit_lock);
6116 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6117 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006118 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006119 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006120#ifdef CONFIG_BQL
6121 dql_init(&queue->dql, HZ);
6122#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006123}
6124
Tom Herberte6484932010-10-18 18:04:39 +00006125static int netif_alloc_netdev_queues(struct net_device *dev)
6126{
6127 unsigned int count = dev->num_tx_queues;
6128 struct netdev_queue *tx;
6129
6130 BUG_ON(count < 1);
6131
6132 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
6133 if (!tx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006134 pr_err("netdev: Unable to allocate %u tx queues\n", count);
Tom Herberte6484932010-10-18 18:04:39 +00006135 return -ENOMEM;
6136 }
6137 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006138
Tom Herberte6484932010-10-18 18:04:39 +00006139 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6140 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006141
6142 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006143}
6144
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006145/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006146 * register_netdevice - register a network device
6147 * @dev: device to register
6148 *
6149 * Take a completed network device structure and add it to the kernel
6150 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6151 * chain. 0 is returned on success. A negative errno code is returned
6152 * on a failure to set up the device, or if the name is a duplicate.
6153 *
6154 * Callers must hold the rtnl semaphore. You may want
6155 * register_netdev() instead of this.
6156 *
6157 * BUGS:
6158 * The locking appears insufficient to guarantee two parallel registers
6159 * will not get the same name.
6160 */
6161
6162int register_netdevice(struct net_device *dev)
6163{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006164 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006165 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006166
6167 BUG_ON(dev_boot_phase);
6168 ASSERT_RTNL();
6169
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006170 might_sleep();
6171
Linus Torvalds1da177e2005-04-16 15:20:36 -07006172 /* When net_device's are persistent, this will be fatal. */
6173 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006174 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175
David S. Millerf1f28aa2008-07-15 00:08:33 -07006176 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006177 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178
Linus Torvalds1da177e2005-04-16 15:20:36 -07006179 dev->iflink = -1;
6180
Gao feng828de4f2012-09-13 20:58:27 +00006181 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006182 if (ret < 0)
6183 goto out;
6184
Linus Torvalds1da177e2005-04-16 15:20:36 -07006185 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006186 if (dev->netdev_ops->ndo_init) {
6187 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006188 if (ret) {
6189 if (ret > 0)
6190 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006191 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006192 }
6193 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006194
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006195 ret = -EBUSY;
6196 if (!dev->ifindex)
6197 dev->ifindex = dev_new_index(net);
6198 else if (__dev_get_by_index(net, dev->ifindex))
6199 goto err_uninit;
6200
Linus Torvalds1da177e2005-04-16 15:20:36 -07006201 if (dev->iflink == -1)
6202 dev->iflink = dev->ifindex;
6203
Michał Mirosław5455c692011-02-15 16:59:17 +00006204 /* Transfer changeable features to wanted_features and enable
6205 * software offloads (GSO and GRO).
6206 */
6207 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006208 dev->features |= NETIF_F_SOFT_FEATURES;
6209 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006210
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006211 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00006212 if (!(dev->flags & IFF_LOOPBACK)) {
6213 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6214 if (dev->features & NETIF_F_ALL_CSUM) {
6215 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
6216 dev->features |= NETIF_F_NOCACHE_COPY;
6217 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006218 }
6219
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006220 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006221 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006222 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006223
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006224 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6225 ret = notifier_to_errno(ret);
6226 if (ret)
6227 goto err_uninit;
6228
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006229 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006230 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006231 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006232 dev->reg_state = NETREG_REGISTERED;
6233
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006234 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006235
Linus Torvalds1da177e2005-04-16 15:20:36 -07006236 /*
6237 * Default initial state at registry is that the
6238 * device is present.
6239 */
6240
6241 set_bit(__LINK_STATE_PRESENT, &dev->state);
6242
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006243 linkwatch_init_dev(dev);
6244
Linus Torvalds1da177e2005-04-16 15:20:36 -07006245 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006246 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006247 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006248 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006249
Jiri Pirko948b3372013-01-08 01:38:25 +00006250 /* If the device has permanent device address, driver should
6251 * set dev_addr and also addr_assign_type should be set to
6252 * NET_ADDR_PERM (default value).
6253 */
6254 if (dev->addr_assign_type == NET_ADDR_PERM)
6255 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6256
Linus Torvalds1da177e2005-04-16 15:20:36 -07006257 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006258 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006259 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006260 if (ret) {
6261 rollback_registered(dev);
6262 dev->reg_state = NETREG_UNREGISTERED;
6263 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006264 /*
6265 * Prevent userspace races by waiting until the network
6266 * device is fully setup before sending notifications.
6267 */
Patrick McHardya2835762010-02-26 06:34:51 +00006268 if (!dev->rtnl_link_ops ||
6269 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6270 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006271
6272out:
6273 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006274
6275err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006276 if (dev->netdev_ops->ndo_uninit)
6277 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006278 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006279}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006280EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281
6282/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006283 * init_dummy_netdev - init a dummy network device for NAPI
6284 * @dev: device to init
6285 *
6286 * This takes a network device structure and initialize the minimum
6287 * amount of fields so it can be used to schedule NAPI polls without
6288 * registering a full blown interface. This is to be used by drivers
6289 * that need to tie several hardware interfaces to a single NAPI
6290 * poll scheduler due to HW limitations.
6291 */
6292int init_dummy_netdev(struct net_device *dev)
6293{
6294 /* Clear everything. Note we don't initialize spinlocks
6295 * are they aren't supposed to be taken by any of the
6296 * NAPI code and this dummy netdev is supposed to be
6297 * only ever used for NAPI polls
6298 */
6299 memset(dev, 0, sizeof(struct net_device));
6300
6301 /* make sure we BUG if trying to hit standard
6302 * register/unregister code path
6303 */
6304 dev->reg_state = NETREG_DUMMY;
6305
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006306 /* NAPI wants this */
6307 INIT_LIST_HEAD(&dev->napi_list);
6308
6309 /* a dummy interface is started by default */
6310 set_bit(__LINK_STATE_PRESENT, &dev->state);
6311 set_bit(__LINK_STATE_START, &dev->state);
6312
Eric Dumazet29b44332010-10-11 10:22:12 +00006313 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6314 * because users of this 'device' dont need to change
6315 * its refcount.
6316 */
6317
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006318 return 0;
6319}
6320EXPORT_SYMBOL_GPL(init_dummy_netdev);
6321
6322
6323/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006324 * register_netdev - register a network device
6325 * @dev: device to register
6326 *
6327 * Take a completed network device structure and add it to the kernel
6328 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6329 * chain. 0 is returned on success. A negative errno code is returned
6330 * on a failure to set up the device, or if the name is a duplicate.
6331 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006332 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006333 * and expands the device name if you passed a format string to
6334 * alloc_netdev.
6335 */
6336int register_netdev(struct net_device *dev)
6337{
6338 int err;
6339
6340 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006341 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342 rtnl_unlock();
6343 return err;
6344}
6345EXPORT_SYMBOL(register_netdev);
6346
Eric Dumazet29b44332010-10-11 10:22:12 +00006347int netdev_refcnt_read(const struct net_device *dev)
6348{
6349 int i, refcnt = 0;
6350
6351 for_each_possible_cpu(i)
6352 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6353 return refcnt;
6354}
6355EXPORT_SYMBOL(netdev_refcnt_read);
6356
Ben Hutchings2c530402012-07-10 10:55:09 +00006357/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006358 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006359 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360 *
6361 * This is called when unregistering network devices.
6362 *
6363 * Any protocol or device that holds a reference should register
6364 * for netdevice notification, and cleanup and put back the
6365 * reference if they receive an UNREGISTER event.
6366 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006367 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006368 */
6369static void netdev_wait_allrefs(struct net_device *dev)
6370{
6371 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006372 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006373
Eric Dumazete014deb2009-11-17 05:59:21 +00006374 linkwatch_forget_dev(dev);
6375
Linus Torvalds1da177e2005-04-16 15:20:36 -07006376 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006377 refcnt = netdev_refcnt_read(dev);
6378
6379 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006380 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006381 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382
6383 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006384 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006385
Eric Dumazet748e2d92012-08-22 21:50:59 +00006386 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006387 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006388 rtnl_lock();
6389
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006390 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006391 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6392 &dev->state)) {
6393 /* We must not have linkwatch events
6394 * pending on unregister. If this
6395 * happens, we simply run the queue
6396 * unscheduled, resulting in a noop
6397 * for this device.
6398 */
6399 linkwatch_run_queue();
6400 }
6401
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006402 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006403
6404 rebroadcast_time = jiffies;
6405 }
6406
6407 msleep(250);
6408
Eric Dumazet29b44332010-10-11 10:22:12 +00006409 refcnt = netdev_refcnt_read(dev);
6410
Linus Torvalds1da177e2005-04-16 15:20:36 -07006411 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006412 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6413 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006414 warning_time = jiffies;
6415 }
6416 }
6417}
6418
6419/* The sequence is:
6420 *
6421 * rtnl_lock();
6422 * ...
6423 * register_netdevice(x1);
6424 * register_netdevice(x2);
6425 * ...
6426 * unregister_netdevice(y1);
6427 * unregister_netdevice(y2);
6428 * ...
6429 * rtnl_unlock();
6430 * free_netdev(y1);
6431 * free_netdev(y2);
6432 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006433 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006434 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006435 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006436 * without deadlocking with linkwatch via keventd.
6437 * 2) Since we run with the RTNL semaphore not held, we can sleep
6438 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006439 *
6440 * We must not return until all unregister events added during
6441 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006442 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006443void netdev_run_todo(void)
6444{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006445 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006446
Linus Torvalds1da177e2005-04-16 15:20:36 -07006447 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006448 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006449
6450 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006451
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006452
6453 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006454 if (!list_empty(&list))
6455 rcu_barrier();
6456
Linus Torvalds1da177e2005-04-16 15:20:36 -07006457 while (!list_empty(&list)) {
6458 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006459 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460 list_del(&dev->todo_list);
6461
Eric Dumazet748e2d92012-08-22 21:50:59 +00006462 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006463 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006464 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006465
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006466 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006467 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006468 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006469 dump_stack();
6470 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006472
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006473 dev->reg_state = NETREG_UNREGISTERED;
6474
Changli Gao152102c2010-03-30 20:16:22 +00006475 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006476
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006477 netdev_wait_allrefs(dev);
6478
6479 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006480 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006481 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6482 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006483 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006484
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006485 if (dev->destructor)
6486 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006487
6488 /* Free network device */
6489 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006491}
6492
Ben Hutchings3cfde792010-07-09 09:11:52 +00006493/* Convert net_device_stats to rtnl_link_stats64. They have the same
6494 * fields in the same order, with only the type differing.
6495 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006496void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6497 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006498{
6499#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006500 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6501 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006502#else
6503 size_t i, n = sizeof(*stats64) / sizeof(u64);
6504 const unsigned long *src = (const unsigned long *)netdev_stats;
6505 u64 *dst = (u64 *)stats64;
6506
6507 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6508 sizeof(*stats64) / sizeof(u64));
6509 for (i = 0; i < n; i++)
6510 dst[i] = src[i];
6511#endif
6512}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006513EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006514
Eric Dumazetd83345a2009-11-16 03:36:51 +00006515/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006516 * dev_get_stats - get network device statistics
6517 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006518 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006519 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006520 * Get network statistics from device. Return @storage.
6521 * The device driver may provide its own method by setting
6522 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6523 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006524 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006525struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6526 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006527{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006528 const struct net_device_ops *ops = dev->netdev_ops;
6529
Eric Dumazet28172732010-07-07 14:58:56 -07006530 if (ops->ndo_get_stats64) {
6531 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006532 ops->ndo_get_stats64(dev, storage);
6533 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006534 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006535 } else {
6536 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006537 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006538 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006539 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006540}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006541EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006542
Eric Dumazet24824a02010-10-02 06:11:55 +00006543struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006544{
Eric Dumazet24824a02010-10-02 06:11:55 +00006545 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006546
Eric Dumazet24824a02010-10-02 06:11:55 +00006547#ifdef CONFIG_NET_CLS_ACT
6548 if (queue)
6549 return queue;
6550 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6551 if (!queue)
6552 return NULL;
6553 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006554 queue->qdisc = &noop_qdisc;
6555 queue->qdisc_sleeping = &noop_qdisc;
6556 rcu_assign_pointer(dev->ingress_queue, queue);
6557#endif
6558 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006559}
6560
Eric Dumazet2c60db02012-09-16 09:17:26 +00006561static const struct ethtool_ops default_ethtool_ops;
6562
Linus Torvalds1da177e2005-04-16 15:20:36 -07006563/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006564 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006565 * @sizeof_priv: size of private data to allocate space for
6566 * @name: device name format string
6567 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006568 * @txqs: the number of TX subqueues to allocate
6569 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006570 *
6571 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006572 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006573 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006575struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6576 void (*setup)(struct net_device *),
6577 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006579 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006580 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006581 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006582
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006583 BUG_ON(strlen(name) >= sizeof(dev->name));
6584
Tom Herbert36909ea2011-01-09 19:36:31 +00006585 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006586 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006587 return NULL;
6588 }
6589
Tom Herbert36909ea2011-01-09 19:36:31 +00006590#ifdef CONFIG_RPS
6591 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006592 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006593 return NULL;
6594 }
6595#endif
6596
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006597 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006598 if (sizeof_priv) {
6599 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006600 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006601 alloc_size += sizeof_priv;
6602 }
6603 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006604 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006605
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07006606 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006607 if (!p) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006608 pr_err("alloc_netdev: Unable to allocate device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006609 return NULL;
6610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006611
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006612 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006613 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006614
Eric Dumazet29b44332010-10-11 10:22:12 +00006615 dev->pcpu_refcnt = alloc_percpu(int);
6616 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00006617 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006618
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006620 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621
Jiri Pirko22bedad32010-04-01 21:22:57 +00006622 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006623 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006624
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006625 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006626
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006627 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006628 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006629
Herbert Xud565b0a2008-12-15 23:38:52 -08006630 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006631 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006632 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006633 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006634 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006635 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006636
6637 dev->num_tx_queues = txqs;
6638 dev->real_num_tx_queues = txqs;
6639 if (netif_alloc_netdev_queues(dev))
6640 goto free_all;
6641
6642#ifdef CONFIG_RPS
6643 dev->num_rx_queues = rxqs;
6644 dev->real_num_rx_queues = rxqs;
6645 if (netif_alloc_rx_queues(dev))
6646 goto free_all;
6647#endif
6648
Linus Torvalds1da177e2005-04-16 15:20:36 -07006649 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006650 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006651 if (!dev->ethtool_ops)
6652 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006653 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006654
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006655free_all:
6656 free_netdev(dev);
6657 return NULL;
6658
Eric Dumazet29b44332010-10-11 10:22:12 +00006659free_pcpu:
6660 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00006661 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006662#ifdef CONFIG_RPS
6663 kfree(dev->_rx);
6664#endif
6665
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006666free_p:
6667 kfree(p);
6668 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006669}
Tom Herbert36909ea2011-01-09 19:36:31 +00006670EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006671
6672/**
6673 * free_netdev - free network device
6674 * @dev: device
6675 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006676 * This function does the last stage of destroying an allocated device
6677 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678 * If this is the last reference then it will be freed.
6679 */
6680void free_netdev(struct net_device *dev)
6681{
Herbert Xud565b0a2008-12-15 23:38:52 -08006682 struct napi_struct *p, *n;
6683
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006684 release_net(dev_net(dev));
6685
David S. Millere8a04642008-07-17 00:34:19 -07006686 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006687#ifdef CONFIG_RPS
6688 kfree(dev->_rx);
6689#endif
David S. Millere8a04642008-07-17 00:34:19 -07006690
Eric Dumazet33d480c2011-08-11 19:30:52 +00006691 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006692
Jiri Pirkof001fde2009-05-05 02:48:28 +00006693 /* Flush device addresses */
6694 dev_addr_flush(dev);
6695
Herbert Xud565b0a2008-12-15 23:38:52 -08006696 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6697 netif_napi_del(p);
6698
Eric Dumazet29b44332010-10-11 10:22:12 +00006699 free_percpu(dev->pcpu_refcnt);
6700 dev->pcpu_refcnt = NULL;
6701
Stephen Hemminger3041a062006-05-26 13:25:24 -07006702 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006703 if (dev->reg_state == NETREG_UNINITIALIZED) {
6704 kfree((char *)dev - dev->padded);
6705 return;
6706 }
6707
6708 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6709 dev->reg_state = NETREG_RELEASED;
6710
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006711 /* will free via device release */
6712 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006713}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006714EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006715
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006716/**
6717 * synchronize_net - Synchronize with packet receive processing
6718 *
6719 * Wait for packets currently being received to be done.
6720 * Does not block later packets from starting.
6721 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006722void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723{
6724 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006725 if (rtnl_is_locked())
6726 synchronize_rcu_expedited();
6727 else
6728 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006729}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006730EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006731
6732/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006733 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006734 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006735 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006736 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006738 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006739 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006740 *
6741 * Callers must hold the rtnl semaphore. You may want
6742 * unregister_netdev() instead of this.
6743 */
6744
Eric Dumazet44a08732009-10-27 07:03:04 +00006745void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006746{
Herbert Xua6620712007-12-12 19:21:56 -08006747 ASSERT_RTNL();
6748
Eric Dumazet44a08732009-10-27 07:03:04 +00006749 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006750 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006751 } else {
6752 rollback_registered(dev);
6753 /* Finish processing unregister after unlock */
6754 net_set_todo(dev);
6755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006756}
Eric Dumazet44a08732009-10-27 07:03:04 +00006757EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758
6759/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006760 * unregister_netdevice_many - unregister many devices
6761 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006762 */
6763void unregister_netdevice_many(struct list_head *head)
6764{
6765 struct net_device *dev;
6766
6767 if (!list_empty(head)) {
6768 rollback_registered_many(head);
6769 list_for_each_entry(dev, head, unreg_list)
6770 net_set_todo(dev);
6771 }
6772}
Eric Dumazet63c80992009-10-27 07:06:49 +00006773EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006774
6775/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006776 * unregister_netdev - remove device from the kernel
6777 * @dev: device
6778 *
6779 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006780 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006781 *
6782 * This is just a wrapper for unregister_netdevice that takes
6783 * the rtnl semaphore. In general you want to use this and not
6784 * unregister_netdevice.
6785 */
6786void unregister_netdev(struct net_device *dev)
6787{
6788 rtnl_lock();
6789 unregister_netdevice(dev);
6790 rtnl_unlock();
6791}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006792EXPORT_SYMBOL(unregister_netdev);
6793
Eric W. Biedermance286d32007-09-12 13:53:49 +02006794/**
6795 * dev_change_net_namespace - move device to different nethost namespace
6796 * @dev: device
6797 * @net: network namespace
6798 * @pat: If not NULL name pattern to try if the current device name
6799 * is already taken in the destination network namespace.
6800 *
6801 * This function shuts down a device interface and moves it
6802 * to a new network namespace. On success 0 is returned, on
6803 * a failure a netagive errno code is returned.
6804 *
6805 * Callers must hold the rtnl semaphore.
6806 */
6807
6808int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6809{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006810 int err;
6811
6812 ASSERT_RTNL();
6813
6814 /* Don't allow namespace local devices to be moved. */
6815 err = -EINVAL;
6816 if (dev->features & NETIF_F_NETNS_LOCAL)
6817 goto out;
6818
6819 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006820 if (dev->reg_state != NETREG_REGISTERED)
6821 goto out;
6822
6823 /* Get out if there is nothing todo */
6824 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006825 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006826 goto out;
6827
6828 /* Pick the destination device name, and ensure
6829 * we can use it in the destination network namespace.
6830 */
6831 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006832 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006833 /* We get here if we can't use the current device name */
6834 if (!pat)
6835 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006836 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006837 goto out;
6838 }
6839
6840 /*
6841 * And now a mini version of register_netdevice unregister_netdevice.
6842 */
6843
6844 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006845 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006846
6847 /* And unlink it from device chain */
6848 err = -ENODEV;
6849 unlist_netdevice(dev);
6850
6851 synchronize_net();
6852
6853 /* Shutdown queueing discipline. */
6854 dev_shutdown(dev);
6855
6856 /* Notify protocols, that we are about to destroy
6857 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006858
6859 Note that dev->reg_state stays at NETREG_REGISTERED.
6860 This is wanted because this way 8021q and macvlan know
6861 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006862 */
6863 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006864 rcu_barrier();
6865 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00006866 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006867
6868 /*
6869 * Flush the unicast and multicast chains
6870 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006871 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006872 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006873
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006874 /* Send a netdev-removed uevent to the old namespace */
6875 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6876
Eric W. Biedermance286d32007-09-12 13:53:49 +02006877 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006878 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006879
Eric W. Biedermance286d32007-09-12 13:53:49 +02006880 /* If there is an ifindex conflict assign a new one */
6881 if (__dev_get_by_index(net, dev->ifindex)) {
6882 int iflink = (dev->iflink == dev->ifindex);
6883 dev->ifindex = dev_new_index(net);
6884 if (iflink)
6885 dev->iflink = dev->ifindex;
6886 }
6887
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006888 /* Send a netdev-add uevent to the new namespace */
6889 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6890
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006891 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006892 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006893 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006894
6895 /* Add the device back in the hashes */
6896 list_netdevice(dev);
6897
6898 /* Notify protocols, that a new device appeared. */
6899 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6900
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006901 /*
6902 * Prevent userspace races by waiting until the network
6903 * device is fully setup before sending notifications.
6904 */
6905 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6906
Eric W. Biedermance286d32007-09-12 13:53:49 +02006907 synchronize_net();
6908 err = 0;
6909out:
6910 return err;
6911}
Johannes Berg463d0182009-07-14 00:33:35 +02006912EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006913
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914static int dev_cpu_callback(struct notifier_block *nfb,
6915 unsigned long action,
6916 void *ocpu)
6917{
6918 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006919 struct sk_buff *skb;
6920 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6921 struct softnet_data *sd, *oldsd;
6922
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006923 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006924 return NOTIFY_OK;
6925
6926 local_irq_disable();
6927 cpu = smp_processor_id();
6928 sd = &per_cpu(softnet_data, cpu);
6929 oldsd = &per_cpu(softnet_data, oldcpu);
6930
6931 /* Find end of our completion_queue. */
6932 list_skb = &sd->completion_queue;
6933 while (*list_skb)
6934 list_skb = &(*list_skb)->next;
6935 /* Append completion queue from offline CPU. */
6936 *list_skb = oldsd->completion_queue;
6937 oldsd->completion_queue = NULL;
6938
Linus Torvalds1da177e2005-04-16 15:20:36 -07006939 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006940 if (oldsd->output_queue) {
6941 *sd->output_queue_tailp = oldsd->output_queue;
6942 sd->output_queue_tailp = oldsd->output_queue_tailp;
6943 oldsd->output_queue = NULL;
6944 oldsd->output_queue_tailp = &oldsd->output_queue;
6945 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006946 /* Append NAPI poll list from offline CPU. */
6947 if (!list_empty(&oldsd->poll_list)) {
6948 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6949 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6950 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006951
6952 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6953 local_irq_enable();
6954
6955 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006956 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6957 netif_rx(skb);
6958 input_queue_head_incr(oldsd);
6959 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006960 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006961 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006962 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006963 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006964
6965 return NOTIFY_OK;
6966}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006967
6968
Herbert Xu7f353bf2007-08-10 15:47:58 -07006969/**
Herbert Xub63365a2008-10-23 01:11:29 -07006970 * netdev_increment_features - increment feature set by one
6971 * @all: current feature set
6972 * @one: new feature set
6973 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006974 *
6975 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006976 * @one to the master device with current feature set @all. Will not
6977 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006978 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006979netdev_features_t netdev_increment_features(netdev_features_t all,
6980 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006981{
Michał Mirosław1742f182011-04-22 06:31:16 +00006982 if (mask & NETIF_F_GEN_CSUM)
6983 mask |= NETIF_F_ALL_CSUM;
6984 mask |= NETIF_F_VLAN_CHALLENGED;
6985
6986 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6987 all &= one | ~NETIF_F_ALL_FOR_ALL;
6988
Michał Mirosław1742f182011-04-22 06:31:16 +00006989 /* If one device supports hw checksumming, set for all. */
6990 if (all & NETIF_F_GEN_CSUM)
6991 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006992
6993 return all;
6994}
Herbert Xub63365a2008-10-23 01:11:29 -07006995EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006996
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006997static struct hlist_head *netdev_create_hash(void)
6998{
6999 int i;
7000 struct hlist_head *hash;
7001
7002 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7003 if (hash != NULL)
7004 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7005 INIT_HLIST_HEAD(&hash[i]);
7006
7007 return hash;
7008}
7009
Eric W. Biederman881d9662007-09-17 11:56:21 -07007010/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07007011static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007012{
Rustad, Mark D734b6542012-07-18 09:06:07 +00007013 if (net != &init_net)
7014 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07007015
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007016 net->dev_name_head = netdev_create_hash();
7017 if (net->dev_name_head == NULL)
7018 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007019
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007020 net->dev_index_head = netdev_create_hash();
7021 if (net->dev_index_head == NULL)
7022 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007023
7024 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007025
7026err_idx:
7027 kfree(net->dev_name_head);
7028err_name:
7029 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007030}
7031
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007032/**
7033 * netdev_drivername - network driver for the device
7034 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007035 *
7036 * Determine network driver for device.
7037 */
David S. Miller3019de12011-06-06 16:41:33 -07007038const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07007039{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07007040 const struct device_driver *driver;
7041 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07007042 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07007043
7044 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007045 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07007046 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007047
7048 driver = parent->driver;
7049 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07007050 return driver->name;
7051 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007052}
7053
Joe Perchesb004ff42012-09-12 20:12:19 -07007054static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00007055 struct va_format *vaf)
7056{
7057 int r;
7058
Joe Perchesb004ff42012-09-12 20:12:19 -07007059 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07007060 r = dev_printk_emit(level[1] - '0',
7061 dev->dev.parent,
7062 "%s %s %s: %pV",
7063 dev_driver_string(dev->dev.parent),
7064 dev_name(dev->dev.parent),
7065 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007066 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00007067 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007068 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00007069 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007070 }
Joe Perches256df2f2010-06-27 01:02:35 +00007071
7072 return r;
7073}
7074
7075int netdev_printk(const char *level, const struct net_device *dev,
7076 const char *format, ...)
7077{
7078 struct va_format vaf;
7079 va_list args;
7080 int r;
7081
7082 va_start(args, format);
7083
7084 vaf.fmt = format;
7085 vaf.va = &args;
7086
7087 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007088
Joe Perches256df2f2010-06-27 01:02:35 +00007089 va_end(args);
7090
7091 return r;
7092}
7093EXPORT_SYMBOL(netdev_printk);
7094
7095#define define_netdev_printk_level(func, level) \
7096int func(const struct net_device *dev, const char *fmt, ...) \
7097{ \
7098 int r; \
7099 struct va_format vaf; \
7100 va_list args; \
7101 \
7102 va_start(args, fmt); \
7103 \
7104 vaf.fmt = fmt; \
7105 vaf.va = &args; \
7106 \
7107 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007108 \
Joe Perches256df2f2010-06-27 01:02:35 +00007109 va_end(args); \
7110 \
7111 return r; \
7112} \
7113EXPORT_SYMBOL(func);
7114
7115define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7116define_netdev_printk_level(netdev_alert, KERN_ALERT);
7117define_netdev_printk_level(netdev_crit, KERN_CRIT);
7118define_netdev_printk_level(netdev_err, KERN_ERR);
7119define_netdev_printk_level(netdev_warn, KERN_WARNING);
7120define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7121define_netdev_printk_level(netdev_info, KERN_INFO);
7122
Pavel Emelyanov46650792007-10-08 20:38:39 -07007123static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007124{
7125 kfree(net->dev_name_head);
7126 kfree(net->dev_index_head);
7127}
7128
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007129static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007130 .init = netdev_init,
7131 .exit = netdev_exit,
7132};
7133
Pavel Emelyanov46650792007-10-08 20:38:39 -07007134static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007135{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007136 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007137 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007138 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007139 * initial network namespace
7140 */
7141 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007142 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007143 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007144 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007145
7146 /* Ignore unmoveable devices (i.e. loopback) */
7147 if (dev->features & NETIF_F_NETNS_LOCAL)
7148 continue;
7149
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007150 /* Leave virtual devices for the generic cleanup */
7151 if (dev->rtnl_link_ops)
7152 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007153
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007154 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007155 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7156 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007157 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007158 pr_emerg("%s: failed to move %s to init_net: %d\n",
7159 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007160 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007161 }
7162 }
7163 rtnl_unlock();
7164}
7165
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007166static void __net_exit default_device_exit_batch(struct list_head *net_list)
7167{
7168 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007169 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007170 * Do this across as many network namespaces as possible to
7171 * improve batching efficiency.
7172 */
7173 struct net_device *dev;
7174 struct net *net;
7175 LIST_HEAD(dev_kill_list);
7176
7177 rtnl_lock();
7178 list_for_each_entry(net, net_list, exit_list) {
7179 for_each_netdev_reverse(net, dev) {
7180 if (dev->rtnl_link_ops)
7181 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7182 else
7183 unregister_netdevice_queue(dev, &dev_kill_list);
7184 }
7185 }
7186 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007187 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007188 rtnl_unlock();
7189}
7190
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007191static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007192 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007193 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007194};
7195
Linus Torvalds1da177e2005-04-16 15:20:36 -07007196/*
7197 * Initialize the DEV module. At boot time this walks the device list and
7198 * unhooks any devices that fail to initialise (normally hardware not
7199 * present) and leaves us with a valid list of present and active devices.
7200 *
7201 */
7202
7203/*
7204 * This is called single threaded during boot, so no need
7205 * to take the rtnl semaphore.
7206 */
7207static int __init net_dev_init(void)
7208{
7209 int i, rc = -ENOMEM;
7210
7211 BUG_ON(!dev_boot_phase);
7212
Linus Torvalds1da177e2005-04-16 15:20:36 -07007213 if (dev_proc_init())
7214 goto out;
7215
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007216 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217 goto out;
7218
7219 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007220 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007221 INIT_LIST_HEAD(&ptype_base[i]);
7222
Vlad Yasevich62532da2012-11-15 08:49:10 +00007223 INIT_LIST_HEAD(&offload_base);
7224
Eric W. Biederman881d9662007-09-17 11:56:21 -07007225 if (register_pernet_subsys(&netdev_net_ops))
7226 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007227
7228 /*
7229 * Initialise the packet receive queues.
7230 */
7231
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007232 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007233 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007234
Changli Gaodee42872010-05-02 05:42:16 +00007235 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007236 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007237 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007238 sd->completion_queue = NULL;
7239 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007240 sd->output_queue = NULL;
7241 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007242#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007243 sd->csd.func = rps_trigger_softirq;
7244 sd->csd.info = sd;
7245 sd->csd.flags = 0;
7246 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007247#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007248
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007249 sd->backlog.poll = process_backlog;
7250 sd->backlog.weight = weight_p;
7251 sd->backlog.gro_list = NULL;
7252 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007253 }
7254
Linus Torvalds1da177e2005-04-16 15:20:36 -07007255 dev_boot_phase = 0;
7256
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007257 /* The loopback device is special if any other network devices
7258 * is present in a network namespace the loopback device must
7259 * be present. Since we now dynamically allocate and free the
7260 * loopback device ensure this invariant is maintained by
7261 * keeping the loopback device as the first device on the
7262 * list of network devices. Ensuring the loopback devices
7263 * is the first device that appears and the last network device
7264 * that disappears.
7265 */
7266 if (register_pernet_device(&loopback_net_ops))
7267 goto out;
7268
7269 if (register_pernet_device(&default_device_ops))
7270 goto out;
7271
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007272 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7273 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007274
7275 hotcpu_notifier(dev_cpu_callback, 0);
7276 dst_init();
7277 dev_mcast_init();
7278 rc = 0;
7279out:
7280 return rc;
7281}
7282
7283subsys_initcall(net_dev_init);
7284
Krishna Kumare88721f2009-02-18 17:55:02 -08007285static int __init initialize_hashrnd(void)
7286{
Tom Herbert0a9627f2010-03-16 08:03:29 +00007287 get_random_bytes(&hashrnd, sizeof(hashrnd));
Krishna Kumare88721f2009-02-18 17:55:02 -08007288 return 0;
7289}
7290
7291late_initcall_sync(initialize_hashrnd);
7292