blob: b6d2b32933ba13210ab8601d974a9cb585745302 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/dst.h>
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900130#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900131#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000132#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700133#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000134#include <linux/cpu_rmap.h>
Richard Cochran4dc360c2011-10-19 17:00:35 -0400135#include <linux/net_tstamp.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100136#include <linux/static_key.h>
Eric Dumazet4504b862011-11-28 05:23:23 +0000137#include <net/flow_keys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700139#include "net-sysfs.h"
140
Herbert Xud565b0a2008-12-15 23:38:52 -0800141/* Instead of increasing this, you should create a hash table. */
142#define MAX_GRO_SKBS 8
143
Herbert Xu5d38a072009-01-04 16:13:40 -0800144/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128)
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
150 *
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
153 *
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700158 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 * --BLG
160 *
161 * 0800 IP
162 * 8100 802.1Q VLAN
163 * 0001 802.3
164 * 0002 AX.25
165 * 0004 802.2
166 * 8035 RARP
167 * 0005 SNAP
168 * 0805 X.25
169 * 0806 ARP
170 * 8137 IPX
171 * 0009 Localtalk
172 * 86DD IPv6
173 */
174
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800175#define PTYPE_HASH_SIZE (16)
176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000179static DEFINE_SPINLOCK(offload_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800180static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700181static struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000182static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * semaphore.
187 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800188 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 *
190 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700191 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
194 *
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
198 *
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
201 * semaphore held.
202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204EXPORT_SYMBOL(dev_base_lock);
205
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000206seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000207
Thomas Graf4e985ad2011-06-21 03:11:20 +0000208static inline void dev_base_seq_inc(struct net *net)
209{
210 while (++net->dev_base_seq == 0);
211}
212
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Eric Dumazet95c96172012-04-15 05:58:06 +0000215 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
216
stephen hemminger08e98972009-11-10 07:20:34 +0000217 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Eric W. Biederman881d9662007-09-17 11:56:21 -0700220static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700222 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000225static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000226{
227#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000228 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000229#endif
230}
231
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000232static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000233{
234#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000235 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000236#endif
237}
238
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239/* Device list insertion */
240static int list_netdevice(struct net_device *dev)
241{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900242 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243
244 ASSERT_RTNL();
245
246 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800247 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000248 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000249 hlist_add_head_rcu(&dev->index_hlist,
250 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200251 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000252
253 dev_base_seq_inc(net);
254
Eric W. Biedermance286d32007-09-12 13:53:49 +0200255 return 0;
256}
257
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000258/* Device list removal
259 * caller must respect a RCU grace period before freeing/reusing dev
260 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200261static void unlist_netdevice(struct net_device *dev)
262{
263 ASSERT_RTNL();
264
265 /* Unlink dev from the device chain */
266 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800267 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000268 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000269 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200270 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000271
272 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275/*
276 * Our notifier list
277 */
278
Alan Sternf07d5b92006-05-09 15:23:03 -0700279static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281/*
282 * Device drivers call our routines to queue packets here. We empty the
283 * queue in the local softnet handler.
284 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700285
Eric Dumazet9958da02010-04-17 04:17:02 +0000286DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700287EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
David S. Millercf508b12008-07-22 14:16:42 -0700289#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290/*
David S. Millerc773e842008-07-08 23:13:53 -0700291 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292 * according to dev->type
293 */
294static const unsigned short netdev_lock_type[] =
295 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
296 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
297 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
298 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
299 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
300 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
301 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
302 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
303 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
304 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
305 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
306 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400307 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
308 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
309 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700310
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700311static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700312 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
313 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
314 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
315 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
316 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
317 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
318 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
319 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
320 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
321 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
322 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
323 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400324 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
325 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
326 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700327
328static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700329static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330
331static inline unsigned short netdev_lock_pos(unsigned short dev_type)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
336 if (netdev_lock_type[i] == dev_type)
337 return i;
338 /* the last key is used by default */
339 return ARRAY_SIZE(netdev_lock_type) - 1;
340}
341
David S. Millercf508b12008-07-22 14:16:42 -0700342static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
343 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700344{
345 int i;
346
347 i = netdev_lock_pos(dev_type);
348 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
349 netdev_lock_name[i]);
350}
David S. Millercf508b12008-07-22 14:16:42 -0700351
352static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
353{
354 int i;
355
356 i = netdev_lock_pos(dev->type);
357 lockdep_set_class_and_name(&dev->addr_list_lock,
358 &netdev_addr_lock_key[i],
359 netdev_lock_name[i]);
360}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700361#else
David S. Millercf508b12008-07-22 14:16:42 -0700362static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
363 unsigned short dev_type)
364{
365}
366static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700367{
368}
369#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371/*******************************************************************************
372
373 Protocol management and registration routines
374
375*******************************************************************************/
376
377/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 * Add a protocol ID to the list. Now that the input handler is
379 * smarter we can dispense with all the messy stuff that used to be
380 * here.
381 *
382 * BEWARE!!! Protocol handlers, mangling input packets,
383 * MUST BE last in hash buckets and checking protocol handlers
384 * MUST start from promiscuous ptype_all chain in net_bh.
385 * It is true now, do not change it.
386 * Explanation follows: if protocol handler, mangling packet, will
387 * be the first on list, it is not able to sense, that packet
388 * is cloned and should be copied-on-write, so that it will
389 * change it and subsequent readers will get broken packet.
390 * --ANK (980803)
391 */
392
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000393static inline struct list_head *ptype_head(const struct packet_type *pt)
394{
395 if (pt->type == htons(ETH_P_ALL))
396 return &ptype_all;
397 else
398 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401/**
402 * dev_add_pack - add packet handler
403 * @pt: packet type declaration
404 *
405 * Add a protocol handler to the networking stack. The passed &packet_type
406 * is linked into kernel lists and may not be freed until it has been
407 * removed from the kernel lists.
408 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900409 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 * guarantee all CPU's that are in middle of receiving packets
411 * will see the new packet type (until the next received packet).
412 */
413
414void dev_add_pack(struct packet_type *pt)
415{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000416 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000418 spin_lock(&ptype_lock);
419 list_add_rcu(&pt->list, head);
420 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * __dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900431 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 *
433 * The packet type might still be in use by receivers
434 * and must not be freed until after all the CPU's have gone
435 * through a quiescent state.
436 */
437void __dev_remove_pack(struct packet_type *pt)
438{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000439 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 struct packet_type *pt1;
441
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000442 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 list_for_each_entry(pt1, head, list) {
445 if (pt == pt1) {
446 list_del_rcu(&pt->list);
447 goto out;
448 }
449 }
450
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000451 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000453 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700455EXPORT_SYMBOL(__dev_remove_pack);
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/**
458 * dev_remove_pack - remove packet handler
459 * @pt: packet type declaration
460 *
461 * Remove a protocol handler that was previously added to the kernel
462 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
463 * from the kernel lists and can be freed or reused once this function
464 * returns.
465 *
466 * This call sleeps to guarantee that no CPU is looking at the packet
467 * type after return.
468 */
469void dev_remove_pack(struct packet_type *pt)
470{
471 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 synchronize_net();
474}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700475EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Vlad Yasevich62532da2012-11-15 08:49:10 +0000477
478/**
479 * dev_add_offload - register offload handlers
480 * @po: protocol offload declaration
481 *
482 * Add protocol offload handlers to the networking stack. The passed
483 * &proto_offload is linked into kernel lists and may not be freed until
484 * it has been removed from the kernel lists.
485 *
486 * This call does not sleep therefore it can not
487 * guarantee all CPU's that are in middle of receiving packets
488 * will see the new offload handlers (until the next received packet).
489 */
490void dev_add_offload(struct packet_offload *po)
491{
492 struct list_head *head = &offload_base;
493
494 spin_lock(&offload_lock);
495 list_add_rcu(&po->list, head);
496 spin_unlock(&offload_lock);
497}
498EXPORT_SYMBOL(dev_add_offload);
499
500/**
501 * __dev_remove_offload - remove offload handler
502 * @po: packet offload declaration
503 *
504 * Remove a protocol offload handler that was previously added to the
505 * kernel offload handlers by dev_add_offload(). The passed &offload_type
506 * is removed from the kernel lists and can be freed or reused once this
507 * function returns.
508 *
509 * The packet type might still be in use by receivers
510 * and must not be freed until after all the CPU's have gone
511 * through a quiescent state.
512 */
513void __dev_remove_offload(struct packet_offload *po)
514{
515 struct list_head *head = &offload_base;
516 struct packet_offload *po1;
517
Eric Dumazetc53aa502012-11-16 08:08:23 +0000518 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000519
520 list_for_each_entry(po1, head, list) {
521 if (po == po1) {
522 list_del_rcu(&po->list);
523 goto out;
524 }
525 }
526
527 pr_warn("dev_remove_offload: %p not found\n", po);
528out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000529 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000530}
531EXPORT_SYMBOL(__dev_remove_offload);
532
533/**
534 * dev_remove_offload - remove packet offload handler
535 * @po: packet offload declaration
536 *
537 * Remove a packet offload handler that was previously added to the kernel
538 * offload handlers by dev_add_offload(). The passed &offload_type is
539 * removed from the kernel lists and can be freed or reused once this
540 * function returns.
541 *
542 * This call sleeps to guarantee that no CPU is looking at the packet
543 * type after return.
544 */
545void dev_remove_offload(struct packet_offload *po)
546{
547 __dev_remove_offload(po);
548
549 synchronize_net();
550}
551EXPORT_SYMBOL(dev_remove_offload);
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553/******************************************************************************
554
555 Device Boot-time Settings Routines
556
557*******************************************************************************/
558
559/* Boot time configuration table */
560static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
561
562/**
563 * netdev_boot_setup_add - add new setup entry
564 * @name: name of the device
565 * @map: configured settings for the device
566 *
567 * Adds new setup entry to the dev_boot_setup list. The function
568 * returns 0 on error and 1 on success. This is a generic routine to
569 * all netdevices.
570 */
571static int netdev_boot_setup_add(char *name, struct ifmap *map)
572{
573 struct netdev_boot_setup *s;
574 int i;
575
576 s = dev_boot_setup;
577 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
578 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
579 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700580 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 memcpy(&s[i].map, map, sizeof(s[i].map));
582 break;
583 }
584 }
585
586 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
587}
588
589/**
590 * netdev_boot_setup_check - check boot time settings
591 * @dev: the netdevice
592 *
593 * Check boot time settings for the device.
594 * The found settings are set for the device to be used
595 * later in the device probing.
596 * Returns 0 if no settings found, 1 if they are.
597 */
598int netdev_boot_setup_check(struct net_device *dev)
599{
600 struct netdev_boot_setup *s = dev_boot_setup;
601 int i;
602
603 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
604 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700605 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 dev->irq = s[i].map.irq;
607 dev->base_addr = s[i].map.base_addr;
608 dev->mem_start = s[i].map.mem_start;
609 dev->mem_end = s[i].map.mem_end;
610 return 1;
611 }
612 }
613 return 0;
614}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700615EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617
618/**
619 * netdev_boot_base - get address from boot time settings
620 * @prefix: prefix for network device
621 * @unit: id for network device
622 *
623 * Check boot time settings for the base address of device.
624 * The found settings are set for the device to be used
625 * later in the device probing.
626 * Returns 0 if no settings found.
627 */
628unsigned long netdev_boot_base(const char *prefix, int unit)
629{
630 const struct netdev_boot_setup *s = dev_boot_setup;
631 char name[IFNAMSIZ];
632 int i;
633
634 sprintf(name, "%s%d", prefix, unit);
635
636 /*
637 * If device already registered then return base of 1
638 * to indicate not to probe for this interface
639 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700640 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return 1;
642
643 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
644 if (!strcmp(name, s[i].name))
645 return s[i].map.base_addr;
646 return 0;
647}
648
649/*
650 * Saves at boot time configured settings for any netdevice.
651 */
652int __init netdev_boot_setup(char *str)
653{
654 int ints[5];
655 struct ifmap map;
656
657 str = get_options(str, ARRAY_SIZE(ints), ints);
658 if (!str || !*str)
659 return 0;
660
661 /* Save settings */
662 memset(&map, 0, sizeof(map));
663 if (ints[0] > 0)
664 map.irq = ints[1];
665 if (ints[0] > 1)
666 map.base_addr = ints[2];
667 if (ints[0] > 2)
668 map.mem_start = ints[3];
669 if (ints[0] > 3)
670 map.mem_end = ints[4];
671
672 /* Add new entry to the list */
673 return netdev_boot_setup_add(str, &map);
674}
675
676__setup("netdev=", netdev_boot_setup);
677
678/*******************************************************************************
679
680 Device Interface Subroutines
681
682*******************************************************************************/
683
684/**
685 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700686 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 * @name: name to find
688 *
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
694 */
695
Eric W. Biederman881d9662007-09-17 11:56:21 -0700696struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
698 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700699 struct net_device *dev;
700 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700702 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (!strncmp(dev->name, name, IFNAMSIZ))
704 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return NULL;
707}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700708EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 * dev_get_by_name_rcu - find a device by its name
712 * @net: the applicable net namespace
713 * @name: name to find
714 *
715 * Find an interface by name.
716 * If the name is found a pointer to the device is returned.
717 * If the name is not found then %NULL is returned.
718 * The reference counters are not incremented so the caller must be
719 * careful with locks. The caller must hold RCU lock.
720 */
721
722struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
723{
724 struct hlist_node *p;
725 struct net_device *dev;
726 struct hlist_head *head = dev_name_hash(net, name);
727
728 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
729 if (!strncmp(dev->name, name, IFNAMSIZ))
730 return dev;
731
732 return NULL;
733}
734EXPORT_SYMBOL(dev_get_by_name_rcu);
735
736/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700738 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 * @name: name to find
740 *
741 * Find an interface by name. This can be called from any
742 * context and does its own locking. The returned handle has
743 * the usage count incremented and the caller must use dev_put() to
744 * release it when it is no longer needed. %NULL is returned if no
745 * matching device is found.
746 */
747
Eric W. Biederman881d9662007-09-17 11:56:21 -0700748struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 struct net_device *dev;
751
Eric Dumazet72c95282009-10-30 07:11:27 +0000752 rcu_read_lock();
753 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if (dev)
755 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return dev;
758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700759EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761/**
762 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700763 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * @ifindex: index of device
765 *
766 * Search for an interface by index. Returns %NULL if the device
767 * is not found or a pointer to the device. The device has not
768 * had its reference counter increased so the caller must be careful
769 * about locking. The caller must hold either the RTNL semaphore
770 * or @dev_base_lock.
771 */
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700776 struct net_device *dev;
777 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700779 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (dev->ifindex == ifindex)
781 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return NULL;
784}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700785EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000787/**
788 * dev_get_by_index_rcu - find a device by its ifindex
789 * @net: the applicable net namespace
790 * @ifindex: index of device
791 *
792 * Search for an interface by index. Returns %NULL if the device
793 * is not found or a pointer to the device. The device has not
794 * had its reference counter increased so the caller must be careful
795 * about locking. The caller must hold RCU lock.
796 */
797
798struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
799{
800 struct hlist_node *p;
801 struct net_device *dev;
802 struct hlist_head *head = dev_index_hash(net, ifindex);
803
804 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
805 if (dev->ifindex == ifindex)
806 return dev;
807
808 return NULL;
809}
810EXPORT_SYMBOL(dev_get_by_index_rcu);
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
813/**
814 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700815 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * @ifindex: index of device
817 *
818 * Search for an interface by index. Returns NULL if the device
819 * is not found or a pointer to the device. The device returned has
820 * had a reference added and the pointer is safe until the user calls
821 * dev_put to indicate they have finished with it.
822 */
823
Eric W. Biederman881d9662007-09-17 11:56:21 -0700824struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
826 struct net_device *dev;
827
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000828 rcu_read_lock();
829 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (dev)
831 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000832 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return dev;
834}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700835EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000838 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700839 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * @type: media type of device
841 * @ha: hardware address
842 *
843 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800844 * is not found or a pointer to the device.
845 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000846 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 * and the caller must therefore be careful about locking
848 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
850
Eric Dumazet941666c2010-12-05 01:23:53 +0000851struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
852 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
854 struct net_device *dev;
855
Eric Dumazet941666c2010-12-05 01:23:53 +0000856 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (dev->type == type &&
858 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700859 return dev;
860
861 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
Eric Dumazet941666c2010-12-05 01:23:53 +0000863EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300864
Eric W. Biederman881d9662007-09-17 11:56:21 -0700865struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700866{
867 struct net_device *dev;
868
869 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700870 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700871 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700872 return dev;
873
874 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700876EXPORT_SYMBOL(__dev_getfirstbyhwtype);
877
Eric W. Biederman881d9662007-09-17 11:56:21 -0700878struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000880 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000882 rcu_read_lock();
883 for_each_netdev_rcu(net, dev)
884 if (dev->type == type) {
885 dev_hold(dev);
886 ret = dev;
887 break;
888 }
889 rcu_read_unlock();
890 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892EXPORT_SYMBOL(dev_getfirstbyhwtype);
893
894/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000895 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700896 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 * @if_flags: IFF_* values
898 * @mask: bitmask of bits in if_flags to check
899 *
900 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000901 * is not found or a pointer to the device. Must be called inside
902 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 */
904
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000905struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700906 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700908 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Pavel Emelianov7562f872007-05-03 15:13:45 -0700910 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800911 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700913 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915 }
916 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700917 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000919EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921/**
922 * dev_valid_name - check if name is okay for network device
923 * @name: name string
924 *
925 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700926 * to allow sysfs to work. We also disallow any kind of
927 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 */
David S. Miller95f050b2012-03-06 16:12:15 -0500929bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700931 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500932 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700933 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500934 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700935 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500936 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700937
938 while (*name) {
939 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500940 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700941 name++;
942 }
David S. Miller95f050b2012-03-06 16:12:15 -0500943 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700945EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200948 * __dev_alloc_name - allocate a name for a device
949 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200951 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 *
953 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700954 * id. It scans list of devices to build up a free map, then chooses
955 * the first empty slot. The caller must hold the dev_base or rtnl lock
956 * while allocating the name and adding the device in order to avoid
957 * duplicates.
958 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
959 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 */
961
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200962static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
964 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 const char *p;
966 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700967 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 struct net_device *d;
969
970 p = strnchr(name, IFNAMSIZ-1, '%');
971 if (p) {
972 /*
973 * Verify the string as this thing may have come from
974 * the user. There must be either one "%d" and no other "%"
975 * characters.
976 */
977 if (p[1] != 'd' || strchr(p + 2, '%'))
978 return -EINVAL;
979
980 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700981 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (!inuse)
983 return -ENOMEM;
984
Eric W. Biederman881d9662007-09-17 11:56:21 -0700985 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (!sscanf(d->name, name, &i))
987 continue;
988 if (i < 0 || i >= max_netdevices)
989 continue;
990
991 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 if (!strncmp(buf, d->name, IFNAMSIZ))
994 set_bit(i, inuse);
995 }
996
997 i = find_first_zero_bit(inuse, max_netdevices);
998 free_page((unsigned long) inuse);
999 }
1000
Octavian Purdilad9031022009-11-18 02:36:59 +00001001 if (buf != name)
1002 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001003 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /* It is possible to run out of possible slots
1007 * when the name is long and there isn't enough space left
1008 * for the digits, or if all bits are used.
1009 */
1010 return -ENFILE;
1011}
1012
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001013/**
1014 * dev_alloc_name - allocate a name for a device
1015 * @dev: device
1016 * @name: name format string
1017 *
1018 * Passed a format string - eg "lt%d" it will try and find a suitable
1019 * id. It scans list of devices to build up a free map, then chooses
1020 * the first empty slot. The caller must hold the dev_base or rtnl lock
1021 * while allocating the name and adding the device in order to avoid
1022 * duplicates.
1023 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1024 * Returns the number of the unit assigned or a negative errno code.
1025 */
1026
1027int dev_alloc_name(struct net_device *dev, const char *name)
1028{
1029 char buf[IFNAMSIZ];
1030 struct net *net;
1031 int ret;
1032
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001033 BUG_ON(!dev_net(dev));
1034 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001035 ret = __dev_alloc_name(net, name, buf);
1036 if (ret >= 0)
1037 strlcpy(dev->name, buf, IFNAMSIZ);
1038 return ret;
1039}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001040EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001041
Gao feng828de4f2012-09-13 20:58:27 +00001042static int dev_alloc_name_ns(struct net *net,
1043 struct net_device *dev,
1044 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001045{
Gao feng828de4f2012-09-13 20:58:27 +00001046 char buf[IFNAMSIZ];
1047 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001048
Gao feng828de4f2012-09-13 20:58:27 +00001049 ret = __dev_alloc_name(net, name, buf);
1050 if (ret >= 0)
1051 strlcpy(dev->name, buf, IFNAMSIZ);
1052 return ret;
1053}
1054
1055static int dev_get_valid_name(struct net *net,
1056 struct net_device *dev,
1057 const char *name)
1058{
1059 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001060
Octavian Purdilad9031022009-11-18 02:36:59 +00001061 if (!dev_valid_name(name))
1062 return -EINVAL;
1063
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001064 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001065 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001066 else if (__dev_get_by_name(net, name))
1067 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001068 else if (dev->name != name)
1069 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001070
1071 return 0;
1072}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074/**
1075 * dev_change_name - change name of a device
1076 * @dev: device
1077 * @newname: name (or format string) must be at least IFNAMSIZ
1078 *
1079 * Change name of a device, can pass format strings "eth%d".
1080 * for wildcarding.
1081 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001082int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Herbert Xufcc5a032007-07-30 17:03:38 -07001084 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001086 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001087 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001090 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001092 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (dev->flags & IFF_UP)
1094 return -EBUSY;
1095
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001096 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001097
1098 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001099 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001100 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001101 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001102
Herbert Xufcc5a032007-07-30 17:03:38 -07001103 memcpy(oldname, dev->name, IFNAMSIZ);
1104
Gao feng828de4f2012-09-13 20:58:27 +00001105 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001106 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001107 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001108 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Herbert Xufcc5a032007-07-30 17:03:38 -07001111rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001112 ret = device_rename(&dev->dev, dev->name);
1113 if (ret) {
1114 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001115 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001116 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001117 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001118
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001119 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001120
Herbert Xu7f988ea2007-07-30 16:35:46 -07001121 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001122 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001123 write_unlock_bh(&dev_base_lock);
1124
1125 synchronize_rcu();
1126
1127 write_lock_bh(&dev_base_lock);
1128 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001129 write_unlock_bh(&dev_base_lock);
1130
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001131 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001132 ret = notifier_to_errno(ret);
1133
1134 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001135 /* err >= 0 after dev_alloc_name() or stores the first errno */
1136 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001137 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001138 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001139 memcpy(dev->name, oldname, IFNAMSIZ);
1140 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001141 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001142 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001143 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001144 }
1145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 return err;
1148}
1149
1150/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001151 * dev_set_alias - change ifalias of a device
1152 * @dev: device
1153 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001154 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001155 *
1156 * Set ifalias for a device,
1157 */
1158int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1159{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001160 char *new_ifalias;
1161
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001162 ASSERT_RTNL();
1163
1164 if (len >= IFALIASZ)
1165 return -EINVAL;
1166
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001167 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001168 kfree(dev->ifalias);
1169 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001170 return 0;
1171 }
1172
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001173 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1174 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001175 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001176 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001177
1178 strlcpy(dev->ifalias, alias, len+1);
1179 return len;
1180}
1181
1182
1183/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001184 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001185 * @dev: device to cause notification
1186 *
1187 * Called to indicate a device has changed features.
1188 */
1189void netdev_features_change(struct net_device *dev)
1190{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001191 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001192}
1193EXPORT_SYMBOL(netdev_features_change);
1194
1195/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 * netdev_state_change - device changes state
1197 * @dev: device to cause notification
1198 *
1199 * Called to indicate a device has changed state. This function calls
1200 * the notifier chains for netdev_chain and sends a NEWLINK message
1201 * to the routing socket.
1202 */
1203void netdev_state_change(struct net_device *dev)
1204{
1205 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001206 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1208 }
1209}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001210EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Amerigo Wangee89bab2012-08-09 22:14:56 +00001212/**
1213 * netdev_notify_peers - notify network peers about existence of @dev
1214 * @dev: network device
1215 *
1216 * Generate traffic such that interested network peers are aware of
1217 * @dev, such as by generating a gratuitous ARP. This may be used when
1218 * a device wants to inform the rest of the network about some sort of
1219 * reconfiguration such as a failover event or virtual machine
1220 * migration.
1221 */
1222void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001223{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001224 rtnl_lock();
1225 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1226 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001227}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001228EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230/**
1231 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001232 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 * @name: name of interface
1234 *
1235 * If a network interface is not present and the process has suitable
1236 * privileges this function loads the module. If module loading is not
1237 * available in this kernel then it becomes a nop.
1238 */
1239
Eric W. Biederman881d9662007-09-17 11:56:21 -07001240void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001242 struct net_device *dev;
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001243 int no_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Eric Dumazet72c95282009-10-30 07:11:27 +00001245 rcu_read_lock();
1246 dev = dev_get_by_name_rcu(net, name);
1247 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001249 no_module = !dev;
1250 if (no_module && capable(CAP_NET_ADMIN))
1251 no_module = request_module("netdev-%s", name);
1252 if (no_module && capable(CAP_SYS_MODULE)) {
1253 if (!request_module("%s", name))
Vinson Lee7cecb522012-06-27 14:32:07 +00001254 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1255 name);
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001258EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Patrick McHardybd380812010-02-26 06:34:53 +00001260static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001262 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001263 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001265 ASSERT_RTNL();
1266
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 if (!netif_device_present(dev))
1268 return -ENODEV;
1269
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001270 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271 ret = notifier_to_errno(ret);
1272 if (ret)
1273 return ret;
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001276
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001277 if (ops->ndo_validate_addr)
1278 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001279
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001280 if (!ret && ops->ndo_open)
1281 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Jeff Garzikbada3392007-10-23 20:19:37 -07001283 if (ret)
1284 clear_bit(__LINK_STATE_START, &dev->state);
1285 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001287 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001288 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001290 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 return ret;
1294}
Patrick McHardybd380812010-02-26 06:34:53 +00001295
1296/**
1297 * dev_open - prepare an interface for use.
1298 * @dev: device to open
1299 *
1300 * Takes a device from down to up state. The device's private open
1301 * function is invoked and then the multicast lists are loaded. Finally
1302 * the device is moved into the up state and a %NETDEV_UP message is
1303 * sent to the netdev notifier chain.
1304 *
1305 * Calling this function on an active interface is a nop. On a failure
1306 * a negative errno code is returned.
1307 */
1308int dev_open(struct net_device *dev)
1309{
1310 int ret;
1311
Patrick McHardybd380812010-02-26 06:34:53 +00001312 if (dev->flags & IFF_UP)
1313 return 0;
1314
Patrick McHardybd380812010-02-26 06:34:53 +00001315 ret = __dev_open(dev);
1316 if (ret < 0)
1317 return ret;
1318
Patrick McHardybd380812010-02-26 06:34:53 +00001319 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1320 call_netdevice_notifiers(NETDEV_UP, dev);
1321
1322 return ret;
1323}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001324EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Octavian Purdila44345722010-12-13 12:44:07 +00001326static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
Octavian Purdila44345722010-12-13 12:44:07 +00001328 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001329
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001330 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001331 might_sleep();
1332
Octavian Purdila44345722010-12-13 12:44:07 +00001333 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001334 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Octavian Purdila44345722010-12-13 12:44:07 +00001336 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Octavian Purdila44345722010-12-13 12:44:07 +00001338 /* Synchronize to scheduled poll. We cannot touch poll list, it
1339 * can be even on different cpu. So just clear netif_running().
1340 *
1341 * dev->stop() will invoke napi_disable() on all of it's
1342 * napi_struct instances on this device.
1343 */
1344 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Octavian Purdila44345722010-12-13 12:44:07 +00001347 dev_deactivate_many(head);
1348
1349 list_for_each_entry(dev, head, unreg_list) {
1350 const struct net_device_ops *ops = dev->netdev_ops;
1351
1352 /*
1353 * Call the device specific close. This cannot fail.
1354 * Only if device is UP
1355 *
1356 * We allow it to be called even after a DETACH hot-plug
1357 * event.
1358 */
1359 if (ops->ndo_stop)
1360 ops->ndo_stop(dev);
1361
Octavian Purdila44345722010-12-13 12:44:07 +00001362 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001363 net_dmaengine_put();
1364 }
1365
1366 return 0;
1367}
1368
1369static int __dev_close(struct net_device *dev)
1370{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001371 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001372 LIST_HEAD(single);
1373
1374 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001375 retval = __dev_close_many(&single);
1376 list_del(&single);
1377 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001378}
1379
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001380static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001381{
1382 struct net_device *dev, *tmp;
1383 LIST_HEAD(tmp_list);
1384
1385 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1386 if (!(dev->flags & IFF_UP))
1387 list_move(&dev->unreg_list, &tmp_list);
1388
1389 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001390
Octavian Purdila44345722010-12-13 12:44:07 +00001391 list_for_each_entry(dev, head, unreg_list) {
1392 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1393 call_netdevice_notifiers(NETDEV_DOWN, dev);
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Octavian Purdila44345722010-12-13 12:44:07 +00001396 /* rollback_registered_many needs the complete original list */
1397 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return 0;
1399}
Patrick McHardybd380812010-02-26 06:34:53 +00001400
1401/**
1402 * dev_close - shutdown an interface.
1403 * @dev: device to shutdown
1404 *
1405 * This function moves an active device into down state. A
1406 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1407 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1408 * chain.
1409 */
1410int dev_close(struct net_device *dev)
1411{
Eric Dumazete14a5992011-05-10 12:26:06 -07001412 if (dev->flags & IFF_UP) {
1413 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001414
Eric Dumazete14a5992011-05-10 12:26:06 -07001415 list_add(&dev->unreg_list, &single);
1416 dev_close_many(&single);
1417 list_del(&single);
1418 }
Patrick McHardybd380812010-02-26 06:34:53 +00001419 return 0;
1420}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001421EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001424/**
1425 * dev_disable_lro - disable Large Receive Offload on a device
1426 * @dev: device
1427 *
1428 * Disable Large Receive Offload (LRO) on a net device. Must be
1429 * called under RTNL. This is needed if received packets may be
1430 * forwarded to another interface.
1431 */
1432void dev_disable_lro(struct net_device *dev)
1433{
Neil Hormanf11970e2011-05-24 08:31:09 +00001434 /*
1435 * If we're trying to disable lro on a vlan device
1436 * use the underlying physical device instead
1437 */
1438 if (is_vlan_dev(dev))
1439 dev = vlan_dev_real_dev(dev);
1440
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001441 dev->wanted_features &= ~NETIF_F_LRO;
1442 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001443
Michał Mirosław22d59692011-04-21 12:42:15 +00001444 if (unlikely(dev->features & NETIF_F_LRO))
1445 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001446}
1447EXPORT_SYMBOL(dev_disable_lro);
1448
1449
Eric W. Biederman881d9662007-09-17 11:56:21 -07001450static int dev_boot_phase = 1;
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452/**
1453 * register_netdevice_notifier - register a network notifier block
1454 * @nb: notifier
1455 *
1456 * Register a notifier to be called when network device events occur.
1457 * The notifier passed is linked into the kernel structures and must
1458 * not be reused until it has been unregistered. A negative errno code
1459 * is returned on a failure.
1460 *
1461 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001462 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 * view of the network device list.
1464 */
1465
1466int register_netdevice_notifier(struct notifier_block *nb)
1467{
1468 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001469 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001470 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 int err;
1472
1473 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001474 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001475 if (err)
1476 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001477 if (dev_boot_phase)
1478 goto unlock;
1479 for_each_net(net) {
1480 for_each_netdev(net, dev) {
1481 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1482 err = notifier_to_errno(err);
1483 if (err)
1484 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Eric W. Biederman881d9662007-09-17 11:56:21 -07001486 if (!(dev->flags & IFF_UP))
1487 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001488
Eric W. Biederman881d9662007-09-17 11:56:21 -07001489 nb->notifier_call(nb, NETDEV_UP, dev);
1490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001492
1493unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 rtnl_unlock();
1495 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001496
1497rollback:
1498 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001499 for_each_net(net) {
1500 for_each_netdev(net, dev) {
1501 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001502 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001503
Eric W. Biederman881d9662007-09-17 11:56:21 -07001504 if (dev->flags & IFF_UP) {
1505 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1506 nb->notifier_call(nb, NETDEV_DOWN, dev);
1507 }
1508 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001510 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001511
RongQing.Li8f891482011-11-30 23:43:07 -05001512outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001513 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001514 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001516EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518/**
1519 * unregister_netdevice_notifier - unregister a network notifier block
1520 * @nb: notifier
1521 *
1522 * Unregister a notifier previously registered by
1523 * register_netdevice_notifier(). The notifier is unlinked into the
1524 * kernel structures and may then be reused. A negative errno code
1525 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001526 *
1527 * After unregistering unregister and down device events are synthesized
1528 * for all devices on the device list to the removed notifier to remove
1529 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 */
1531
1532int unregister_netdevice_notifier(struct notifier_block *nb)
1533{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001534 struct net_device *dev;
1535 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001536 int err;
1537
1538 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001539 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001540 if (err)
1541 goto unlock;
1542
1543 for_each_net(net) {
1544 for_each_netdev(net, dev) {
1545 if (dev->flags & IFF_UP) {
1546 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1547 nb->notifier_call(nb, NETDEV_DOWN, dev);
1548 }
1549 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001550 }
1551 }
1552unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001553 rtnl_unlock();
1554 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001556EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
1558/**
1559 * call_netdevice_notifiers - call all network notifier blocks
1560 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001561 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 *
1563 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001564 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 */
1566
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001567int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
Jiri Pirkoab930472010-04-20 01:45:37 -07001569 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001570 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001572EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Ingo Molnarc5905af2012-02-24 08:31:31 +01001574static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001575#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001576/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001577 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001578 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001579 */
1580static atomic_t netstamp_needed_deferred;
1581#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583void net_enable_timestamp(void)
1584{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001585#ifdef HAVE_JUMP_LABEL
1586 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1587
1588 if (deferred) {
1589 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001590 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001591 return;
1592 }
1593#endif
1594 WARN_ON(in_interrupt());
Ingo Molnarc5905af2012-02-24 08:31:31 +01001595 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001597EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599void net_disable_timestamp(void)
1600{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001601#ifdef HAVE_JUMP_LABEL
1602 if (in_interrupt()) {
1603 atomic_inc(&netstamp_needed_deferred);
1604 return;
1605 }
1606#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001607 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001609EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Eric Dumazet3b098e22010-05-15 23:57:10 -07001611static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612{
Eric Dumazet588f0332011-11-15 04:12:55 +00001613 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001614 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001615 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
1617
Eric Dumazet588f0332011-11-15 04:12:55 +00001618#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001619 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001620 if ((COND) && !(SKB)->tstamp.tv64) \
1621 __net_timestamp(SKB); \
1622 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001623
Richard Cochran4dc360c2011-10-19 17:00:35 -04001624static int net_hwtstamp_validate(struct ifreq *ifr)
1625{
1626 struct hwtstamp_config cfg;
1627 enum hwtstamp_tx_types tx_type;
1628 enum hwtstamp_rx_filters rx_filter;
1629 int tx_type_valid = 0;
1630 int rx_filter_valid = 0;
1631
1632 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1633 return -EFAULT;
1634
1635 if (cfg.flags) /* reserved for future extensions */
1636 return -EINVAL;
1637
1638 tx_type = cfg.tx_type;
1639 rx_filter = cfg.rx_filter;
1640
1641 switch (tx_type) {
1642 case HWTSTAMP_TX_OFF:
1643 case HWTSTAMP_TX_ON:
1644 case HWTSTAMP_TX_ONESTEP_SYNC:
1645 tx_type_valid = 1;
1646 break;
1647 }
1648
1649 switch (rx_filter) {
1650 case HWTSTAMP_FILTER_NONE:
1651 case HWTSTAMP_FILTER_ALL:
1652 case HWTSTAMP_FILTER_SOME:
1653 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1654 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1655 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1657 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1658 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1659 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1660 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1662 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1663 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1664 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1665 rx_filter_valid = 1;
1666 break;
1667 }
1668
1669 if (!tx_type_valid || !rx_filter_valid)
1670 return -ERANGE;
1671
1672 return 0;
1673}
1674
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001675static inline bool is_skb_forwardable(struct net_device *dev,
1676 struct sk_buff *skb)
1677{
1678 unsigned int len;
1679
1680 if (!(dev->flags & IFF_UP))
1681 return false;
1682
1683 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1684 if (skb->len <= len)
1685 return true;
1686
1687 /* if TSO is enabled, we don't care about the length as the packet
1688 * could be forwarded without being segmented before
1689 */
1690 if (skb_is_gso(skb))
1691 return true;
1692
1693 return false;
1694}
1695
Arnd Bergmann44540962009-11-26 06:07:08 +00001696/**
1697 * dev_forward_skb - loopback an skb to another netif
1698 *
1699 * @dev: destination network device
1700 * @skb: buffer to forward
1701 *
1702 * return values:
1703 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001704 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001705 *
1706 * dev_forward_skb can be used for injecting an skb from the
1707 * start_xmit function of one device into the receive queue
1708 * of another device.
1709 *
1710 * The receiving device may be in another namespace, so
1711 * we have to clear all information in the skb that could
1712 * impact namespace isolation.
1713 */
1714int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1715{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001716 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1717 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1718 atomic_long_inc(&dev->rx_dropped);
1719 kfree_skb(skb);
1720 return NET_RX_DROP;
1721 }
1722 }
1723
Arnd Bergmann44540962009-11-26 06:07:08 +00001724 skb_orphan(skb);
Ben Greearc736eef2010-07-22 09:54:47 +00001725 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001726
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001727 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001728 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001729 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001730 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001731 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001732 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001733 skb->dev = dev;
1734 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001735 skb->tstamp.tv64 = 0;
1736 skb->pkt_type = PACKET_HOST;
1737 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001738 skb->mark = 0;
1739 secpath_reset(skb);
1740 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001741 return netif_rx(skb);
1742}
1743EXPORT_SYMBOL_GPL(dev_forward_skb);
1744
Changli Gao71d9dec2010-12-15 19:57:25 +00001745static inline int deliver_skb(struct sk_buff *skb,
1746 struct packet_type *pt_prev,
1747 struct net_device *orig_dev)
1748{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001749 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1750 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001751 atomic_inc(&skb->users);
1752 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1753}
1754
Eric Leblondc0de08d2012-08-16 22:02:58 +00001755static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1756{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001757 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001758 return false;
1759
1760 if (ptype->id_match)
1761 return ptype->id_match(ptype, skb->sk);
1762 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1763 return true;
1764
1765 return false;
1766}
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768/*
1769 * Support routine. Sends outgoing frames to any network
1770 * taps currently in use.
1771 */
1772
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001773static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
1775 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001776 struct sk_buff *skb2 = NULL;
1777 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 rcu_read_lock();
1780 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1781 /* Never send packets back to the socket
1782 * they originated from - MvS (miquels@drinkel.ow.org)
1783 */
1784 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001785 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001786 if (pt_prev) {
1787 deliver_skb(skb2, pt_prev, skb->dev);
1788 pt_prev = ptype;
1789 continue;
1790 }
1791
1792 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 if (!skb2)
1794 break;
1795
Eric Dumazet70978182010-12-20 21:22:51 +00001796 net_timestamp_set(skb2);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 /* skb->nh should be correctly
1799 set by sender, so that the second statement is
1800 just protection against buggy protocols.
1801 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001802 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001804 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001805 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001806 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1807 ntohs(skb2->protocol),
1808 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001809 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001812 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001814 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001817 if (pt_prev)
1818 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 rcu_read_unlock();
1820}
1821
Ben Hutchings2c530402012-07-10 10:55:09 +00001822/**
1823 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001824 * @dev: Network device
1825 * @txq: number of queues available
1826 *
1827 * If real_num_tx_queues is changed the tc mappings may no longer be
1828 * valid. To resolve this verify the tc mapping remains valid and if
1829 * not NULL the mapping. With no priorities mapping to this
1830 * offset/count pair it will no longer be used. In the worst case TC0
1831 * is invalid nothing can be done so disable priority mappings. If is
1832 * expected that drivers will fix this mapping if they can before
1833 * calling netif_set_real_num_tx_queues.
1834 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001835static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001836{
1837 int i;
1838 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1839
1840 /* If TC0 is invalidated disable TC mapping */
1841 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001842 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001843 dev->num_tc = 0;
1844 return;
1845 }
1846
1847 /* Invalidated prio to tc mappings set to TC0 */
1848 for (i = 1; i < TC_BITMASK + 1; i++) {
1849 int q = netdev_get_prio_tc_map(dev, i);
1850
1851 tc = &dev->tc_to_txq[q];
1852 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001853 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1854 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001855 netdev_set_prio_tc_map(dev, i, 0);
1856 }
1857 }
1858}
1859
Alexander Duyck537c00d2013-01-10 08:57:02 +00001860#ifdef CONFIG_XPS
1861static DEFINE_MUTEX(xps_map_mutex);
1862#define xmap_dereference(P) \
1863 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1864
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001865static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1866 int cpu, u16 index)
1867{
1868 struct xps_map *map = NULL;
1869 int pos;
1870
1871 if (dev_maps)
1872 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1873
1874 for (pos = 0; map && pos < map->len; pos++) {
1875 if (map->queues[pos] == index) {
1876 if (map->len > 1) {
1877 map->queues[pos] = map->queues[--map->len];
1878 } else {
1879 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1880 kfree_rcu(map, rcu);
1881 map = NULL;
1882 }
1883 break;
1884 }
1885 }
1886
1887 return map;
1888}
1889
Alexander Duyck024e9672013-01-10 08:57:46 +00001890static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001891{
1892 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001893 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001894 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001895
1896 mutex_lock(&xps_map_mutex);
1897 dev_maps = xmap_dereference(dev->xps_maps);
1898
1899 if (!dev_maps)
1900 goto out_no_maps;
1901
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001902 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001903 for (i = index; i < dev->num_tx_queues; i++) {
1904 if (!remove_xps_queue(dev_maps, cpu, i))
1905 break;
1906 }
1907 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001908 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001909 }
1910
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001911 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001912 RCU_INIT_POINTER(dev->xps_maps, NULL);
1913 kfree_rcu(dev_maps, rcu);
1914 }
1915
Alexander Duyck024e9672013-01-10 08:57:46 +00001916 for (i = index; i < dev->num_tx_queues; i++)
1917 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1918 NUMA_NO_NODE);
1919
Alexander Duyck537c00d2013-01-10 08:57:02 +00001920out_no_maps:
1921 mutex_unlock(&xps_map_mutex);
1922}
1923
Alexander Duyck01c5f862013-01-10 08:57:35 +00001924static struct xps_map *expand_xps_map(struct xps_map *map,
1925 int cpu, u16 index)
1926{
1927 struct xps_map *new_map;
1928 int alloc_len = XPS_MIN_MAP_ALLOC;
1929 int i, pos;
1930
1931 for (pos = 0; map && pos < map->len; pos++) {
1932 if (map->queues[pos] != index)
1933 continue;
1934 return map;
1935 }
1936
1937 /* Need to add queue to this CPU's existing map */
1938 if (map) {
1939 if (pos < map->alloc_len)
1940 return map;
1941
1942 alloc_len = map->alloc_len * 2;
1943 }
1944
1945 /* Need to allocate new map to store queue on this CPU's map */
1946 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1947 cpu_to_node(cpu));
1948 if (!new_map)
1949 return NULL;
1950
1951 for (i = 0; i < pos; i++)
1952 new_map->queues[i] = map->queues[i];
1953 new_map->alloc_len = alloc_len;
1954 new_map->len = pos;
1955
1956 return new_map;
1957}
1958
Alexander Duyck537c00d2013-01-10 08:57:02 +00001959int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1960{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001961 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001962 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001963 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001964 int cpu, numa_node_id = -2;
1965 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001966
1967 mutex_lock(&xps_map_mutex);
1968
1969 dev_maps = xmap_dereference(dev->xps_maps);
1970
Alexander Duyck01c5f862013-01-10 08:57:35 +00001971 /* allocate memory for queue storage */
1972 for_each_online_cpu(cpu) {
1973 if (!cpumask_test_cpu(cpu, mask))
1974 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001975
Alexander Duyck01c5f862013-01-10 08:57:35 +00001976 if (!new_dev_maps)
1977 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1978 if (!new_dev_maps)
1979 return -ENOMEM;
1980
1981 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1982 NULL;
1983
1984 map = expand_xps_map(map, cpu, index);
1985 if (!map)
1986 goto error;
1987
1988 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1989 }
1990
1991 if (!new_dev_maps)
1992 goto out_no_new_maps;
1993
1994 for_each_possible_cpu(cpu) {
1995 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1996 /* add queue to CPU maps */
1997 int pos = 0;
1998
1999 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2000 while ((pos < map->len) && (map->queues[pos] != index))
2001 pos++;
2002
2003 if (pos == map->len)
2004 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002005#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002006 if (numa_node_id == -2)
2007 numa_node_id = cpu_to_node(cpu);
2008 else if (numa_node_id != cpu_to_node(cpu))
2009 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002010#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002011 } else if (dev_maps) {
2012 /* fill in the new device map from the old device map */
2013 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2014 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002015 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002016
Alexander Duyck537c00d2013-01-10 08:57:02 +00002017 }
2018
Alexander Duyck01c5f862013-01-10 08:57:35 +00002019 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2020
Alexander Duyck537c00d2013-01-10 08:57:02 +00002021 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002022 if (dev_maps) {
2023 for_each_possible_cpu(cpu) {
2024 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2025 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2026 if (map && map != new_map)
2027 kfree_rcu(map, rcu);
2028 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002029
Alexander Duyck537c00d2013-01-10 08:57:02 +00002030 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002031 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002032
Alexander Duyck01c5f862013-01-10 08:57:35 +00002033 dev_maps = new_dev_maps;
2034 active = true;
2035
2036out_no_new_maps:
2037 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002038 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2039 (numa_node_id >= 0) ? numa_node_id :
2040 NUMA_NO_NODE);
2041
Alexander Duyck01c5f862013-01-10 08:57:35 +00002042 if (!dev_maps)
2043 goto out_no_maps;
2044
2045 /* removes queue from unused CPUs */
2046 for_each_possible_cpu(cpu) {
2047 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2048 continue;
2049
2050 if (remove_xps_queue(dev_maps, cpu, index))
2051 active = true;
2052 }
2053
2054 /* free map if not active */
2055 if (!active) {
2056 RCU_INIT_POINTER(dev->xps_maps, NULL);
2057 kfree_rcu(dev_maps, rcu);
2058 }
2059
2060out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002061 mutex_unlock(&xps_map_mutex);
2062
2063 return 0;
2064error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002065 /* remove any maps that we added */
2066 for_each_possible_cpu(cpu) {
2067 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2068 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2069 NULL;
2070 if (new_map && new_map != map)
2071 kfree(new_map);
2072 }
2073
Alexander Duyck537c00d2013-01-10 08:57:02 +00002074 mutex_unlock(&xps_map_mutex);
2075
Alexander Duyck537c00d2013-01-10 08:57:02 +00002076 kfree(new_dev_maps);
2077 return -ENOMEM;
2078}
2079EXPORT_SYMBOL(netif_set_xps_queue);
2080
2081#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002082/*
2083 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2084 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2085 */
Tom Herberte6484932010-10-18 18:04:39 +00002086int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002087{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002088 int rc;
2089
Tom Herberte6484932010-10-18 18:04:39 +00002090 if (txq < 1 || txq > dev->num_tx_queues)
2091 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002092
Ben Hutchings5c565802011-02-15 19:39:21 +00002093 if (dev->reg_state == NETREG_REGISTERED ||
2094 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002095 ASSERT_RTNL();
2096
Tom Herbert1d24eb42010-11-21 13:17:27 +00002097 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2098 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002099 if (rc)
2100 return rc;
2101
John Fastabend4f57c082011-01-17 08:06:04 +00002102 if (dev->num_tc)
2103 netif_setup_tc(dev, txq);
2104
Alexander Duyck024e9672013-01-10 08:57:46 +00002105 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002106 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002107#ifdef CONFIG_XPS
2108 netif_reset_xps_queues_gt(dev, txq);
2109#endif
2110 }
John Fastabendf0796d52010-07-01 13:21:57 +00002111 }
Tom Herberte6484932010-10-18 18:04:39 +00002112
2113 dev->real_num_tx_queues = txq;
2114 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002115}
2116EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002117
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002118#ifdef CONFIG_RPS
2119/**
2120 * netif_set_real_num_rx_queues - set actual number of RX queues used
2121 * @dev: Network device
2122 * @rxq: Actual number of RX queues
2123 *
2124 * This must be called either with the rtnl_lock held or before
2125 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002126 * negative error code. If called before registration, it always
2127 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002128 */
2129int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2130{
2131 int rc;
2132
Tom Herbertbd25fa72010-10-18 18:00:16 +00002133 if (rxq < 1 || rxq > dev->num_rx_queues)
2134 return -EINVAL;
2135
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002136 if (dev->reg_state == NETREG_REGISTERED) {
2137 ASSERT_RTNL();
2138
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002139 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2140 rxq);
2141 if (rc)
2142 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002143 }
2144
2145 dev->real_num_rx_queues = rxq;
2146 return 0;
2147}
2148EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2149#endif
2150
Ben Hutchings2c530402012-07-10 10:55:09 +00002151/**
2152 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002153 *
2154 * This routine should set an upper limit on the number of RSS queues
2155 * used by default by multiqueue devices.
2156 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002157int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002158{
2159 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2160}
2161EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2162
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002163static inline void __netif_reschedule(struct Qdisc *q)
2164{
2165 struct softnet_data *sd;
2166 unsigned long flags;
2167
2168 local_irq_save(flags);
2169 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002170 q->next_sched = NULL;
2171 *sd->output_queue_tailp = q;
2172 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002173 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2174 local_irq_restore(flags);
2175}
2176
David S. Miller37437bb2008-07-16 02:15:04 -07002177void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002178{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002179 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2180 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002181}
2182EXPORT_SYMBOL(__netif_schedule);
2183
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002184void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002185{
David S. Miller3578b0c2010-08-03 00:24:04 -07002186 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002187 struct softnet_data *sd;
2188 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002189
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002190 local_irq_save(flags);
2191 sd = &__get_cpu_var(softnet_data);
2192 skb->next = sd->completion_queue;
2193 sd->completion_queue = skb;
2194 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2195 local_irq_restore(flags);
2196 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002197}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002198EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002199
2200void dev_kfree_skb_any(struct sk_buff *skb)
2201{
2202 if (in_irq() || irqs_disabled())
2203 dev_kfree_skb_irq(skb);
2204 else
2205 dev_kfree_skb(skb);
2206}
2207EXPORT_SYMBOL(dev_kfree_skb_any);
2208
2209
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002210/**
2211 * netif_device_detach - mark device as removed
2212 * @dev: network device
2213 *
2214 * Mark device as removed from system and therefore no longer available.
2215 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002216void netif_device_detach(struct net_device *dev)
2217{
2218 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2219 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002220 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002221 }
2222}
2223EXPORT_SYMBOL(netif_device_detach);
2224
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002225/**
2226 * netif_device_attach - mark device as attached
2227 * @dev: network device
2228 *
2229 * Mark device as attached from system and restart if needed.
2230 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002231void netif_device_attach(struct net_device *dev)
2232{
2233 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2234 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002235 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002236 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002237 }
2238}
2239EXPORT_SYMBOL(netif_device_attach);
2240
Ben Hutchings36c92472012-01-17 07:57:56 +00002241static void skb_warn_bad_offload(const struct sk_buff *skb)
2242{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002243 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002244 struct net_device *dev = skb->dev;
2245 const char *driver = "";
2246
2247 if (dev && dev->dev.parent)
2248 driver = dev_driver_string(dev->dev.parent);
2249
2250 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2251 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002252 driver, dev ? &dev->features : &null_features,
2253 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002254 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2255 skb_shinfo(skb)->gso_type, skb->ip_summed);
2256}
2257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258/*
2259 * Invalidate hardware checksum when packet is to be mangled, and
2260 * complete checksum manually on outgoing path.
2261 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002262int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263{
Al Virod3bc23e2006-11-14 21:24:49 -08002264 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002265 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
Patrick McHardy84fa7932006-08-29 16:44:56 -07002267 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002268 goto out_set_summed;
2269
2270 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002271 skb_warn_bad_offload(skb);
2272 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 }
2274
Michał Mirosław55508d62010-12-14 15:24:08 +00002275 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002276 BUG_ON(offset >= skb_headlen(skb));
2277 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2278
2279 offset += skb->csum_offset;
2280 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2281
2282 if (skb_cloned(skb) &&
2283 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2285 if (ret)
2286 goto out;
2287 }
2288
Herbert Xua0308472007-10-15 01:47:15 -07002289 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002290out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002292out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 return ret;
2294}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002295EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002297/**
2298 * skb_gso_segment - Perform segmentation on skb.
2299 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002300 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002301 *
2302 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002303 *
2304 * It may return NULL if the skb requires no segmentation. This is
2305 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002306 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002307struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2308 netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002309{
2310 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
Vlad Yasevich22061d82012-11-15 08:49:11 +00002311 struct packet_offload *ptype;
Al Viro252e3342006-11-14 20:48:11 -08002312 __be16 type = skb->protocol;
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002313 int vlan_depth = ETH_HLEN;
Herbert Xua430a432006-07-08 13:34:56 -07002314 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002315
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002316 while (type == htons(ETH_P_8021Q)) {
2317 struct vlan_hdr *vh;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002318
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002319 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
Jesse Gross7b9c6092010-10-20 13:56:04 +00002320 return ERR_PTR(-EINVAL);
2321
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002322 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2323 type = vh->h_vlan_encapsulated_proto;
2324 vlan_depth += VLAN_HLEN;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002325 }
2326
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002327 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002328 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002329 __skb_pull(skb, skb->mac_len);
2330
Herbert Xu67fd1a72009-01-19 16:26:44 -08002331 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002332 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002333
Herbert Xua430a432006-07-08 13:34:56 -07002334 if (skb_header_cloned(skb) &&
2335 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2336 return ERR_PTR(err);
2337 }
2338
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002339 rcu_read_lock();
Vlad Yasevich22061d82012-11-15 08:49:11 +00002340 list_for_each_entry_rcu(ptype, &offload_base, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002341 if (ptype->type == type && ptype->callbacks.gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07002342 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002343 err = ptype->callbacks.gso_send_check(skb);
Herbert Xua430a432006-07-08 13:34:56 -07002344 segs = ERR_PTR(err);
2345 if (err || skb_gso_ok(skb, features))
2346 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07002347 __skb_push(skb, (skb->data -
2348 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07002349 }
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002350 segs = ptype->callbacks.gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002351 break;
2352 }
2353 }
2354 rcu_read_unlock();
2355
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07002356 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07002357
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002358 return segs;
2359}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002360EXPORT_SYMBOL(skb_gso_segment);
2361
Herbert Xufb286bb2005-11-10 13:01:24 -08002362/* Take action when hardware reception checksum errors are detected. */
2363#ifdef CONFIG_BUG
2364void netdev_rx_csum_fault(struct net_device *dev)
2365{
2366 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002367 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002368 dump_stack();
2369 }
2370}
2371EXPORT_SYMBOL(netdev_rx_csum_fault);
2372#endif
2373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374/* Actually, we should eliminate this check as soon as we know, that:
2375 * 1. IOMMU is present and allows to map all the memory.
2376 * 2. No high memory really exists on this machine.
2377 */
2378
Eric Dumazet9092c652010-04-02 13:34:49 -07002379static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002381#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002383 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002384 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2385 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2386 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002387 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002388 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002389 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002391 if (PCI_DMA_BUS_IS_PHYS) {
2392 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
Eric Dumazet9092c652010-04-02 13:34:49 -07002394 if (!pdev)
2395 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002396 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002397 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2398 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002399 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2400 return 1;
2401 }
2402 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002403#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 return 0;
2405}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002407struct dev_gso_cb {
2408 void (*destructor)(struct sk_buff *skb);
2409};
2410
2411#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2412
2413static void dev_gso_skb_destructor(struct sk_buff *skb)
2414{
2415 struct dev_gso_cb *cb;
2416
2417 do {
2418 struct sk_buff *nskb = skb->next;
2419
2420 skb->next = nskb->next;
2421 nskb->next = NULL;
2422 kfree_skb(nskb);
2423 } while (skb->next);
2424
2425 cb = DEV_GSO_CB(skb);
2426 if (cb->destructor)
2427 cb->destructor(skb);
2428}
2429
2430/**
2431 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2432 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002433 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002434 *
2435 * This function segments the given skb and stores the list of segments
2436 * in skb->next.
2437 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002438static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002439{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002440 struct sk_buff *segs;
2441
Herbert Xu576a30e2006-06-27 13:22:38 -07002442 segs = skb_gso_segment(skb, features);
2443
2444 /* Verifying header integrity only. */
2445 if (!segs)
2446 return 0;
2447
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002448 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002449 return PTR_ERR(segs);
2450
2451 skb->next = segs;
2452 DEV_GSO_CB(skb)->destructor = skb->destructor;
2453 skb->destructor = dev_gso_skb_destructor;
2454
2455 return 0;
2456}
2457
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002458static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
Jesse Gross03634662011-01-09 06:23:35 +00002459{
2460 return ((features & NETIF_F_GEN_CSUM) ||
2461 ((features & NETIF_F_V4_CSUM) &&
2462 protocol == htons(ETH_P_IP)) ||
2463 ((features & NETIF_F_V6_CSUM) &&
2464 protocol == htons(ETH_P_IPV6)) ||
2465 ((features & NETIF_F_FCOE_CRC) &&
2466 protocol == htons(ETH_P_FCOE)));
2467}
2468
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002469static netdev_features_t harmonize_features(struct sk_buff *skb,
2470 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002471{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002472 if (skb->ip_summed != CHECKSUM_NONE &&
2473 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002474 features &= ~NETIF_F_ALL_CSUM;
2475 features &= ~NETIF_F_SG;
2476 } else if (illegal_highdma(skb->dev, skb)) {
2477 features &= ~NETIF_F_SG;
2478 }
2479
2480 return features;
2481}
2482
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002483netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002484{
2485 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002486 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002487
Ben Hutchings30b678d2012-07-30 15:57:00 +00002488 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2489 features &= ~NETIF_F_GSO_MASK;
2490
Jesse Gross58e998c2010-10-29 12:14:55 +00002491 if (protocol == htons(ETH_P_8021Q)) {
2492 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2493 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002494 } else if (!vlan_tx_tag_present(skb)) {
2495 return harmonize_features(skb, protocol, features);
2496 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002497
Jesse Gross6ee400a2011-01-17 20:46:00 +00002498 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002499
2500 if (protocol != htons(ETH_P_8021Q)) {
2501 return harmonize_features(skb, protocol, features);
2502 } else {
2503 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Jesse Gross6ee400a2011-01-17 20:46:00 +00002504 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002505 return harmonize_features(skb, protocol, features);
2506 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002507}
Jesse Grossf01a5232011-01-09 06:23:31 +00002508EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002509
John Fastabend6afff0c2010-06-16 14:18:12 +00002510/*
2511 * Returns true if either:
2512 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002513 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002514 */
2515static inline int skb_needs_linearize(struct sk_buff *skb,
Jesse Gross02932ce2011-01-09 06:23:34 +00002516 int features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002517{
Jesse Gross02932ce2011-01-09 06:23:34 +00002518 return skb_is_nonlinear(skb) &&
2519 ((skb_has_frag_list(skb) &&
2520 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002521 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002522 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002523}
2524
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002525int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2526 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002527{
Stephen Hemminger00829822008-11-20 20:14:53 -08002528 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002529 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002530 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002531
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002532 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002533 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002534
Eric Dumazet93f154b2009-05-18 22:19:19 -07002535 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002536 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002537 * its hot in this cpu cache
2538 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002539 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2540 skb_dst_drop(skb);
2541
Jesse Grossfc741212011-01-09 06:23:32 +00002542 features = netif_skb_features(skb);
2543
Jesse Gross7b9c6092010-10-20 13:56:04 +00002544 if (vlan_tx_tag_present(skb) &&
Jesse Grossfc741212011-01-09 06:23:32 +00002545 !(features & NETIF_F_HW_VLAN_TX)) {
Jesse Gross7b9c6092010-10-20 13:56:04 +00002546 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2547 if (unlikely(!skb))
2548 goto out;
2549
2550 skb->vlan_tci = 0;
2551 }
2552
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002553 /* If encapsulation offload request, verify we are testing
2554 * hardware encapsulation features instead of standard
2555 * features for the netdev
2556 */
2557 if (skb->encapsulation)
2558 features &= dev->hw_enc_features;
2559
Jesse Grossfc741212011-01-09 06:23:32 +00002560 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002561 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002562 goto out_kfree_skb;
2563 if (skb->next)
2564 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002565 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002566 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002567 __skb_linearize(skb))
2568 goto out_kfree_skb;
2569
2570 /* If packet is not checksummed and device does not
2571 * support checksumming for this protocol, complete
2572 * checksumming here.
2573 */
2574 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002575 if (skb->encapsulation)
2576 skb_set_inner_transport_header(skb,
2577 skb_checksum_start_offset(skb));
2578 else
2579 skb_set_transport_header(skb,
2580 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002581 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002582 skb_checksum_help(skb))
2583 goto out_kfree_skb;
2584 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002585 }
2586
Eric Dumazetb40863c2012-09-18 20:44:49 +00002587 if (!list_empty(&ptype_all))
2588 dev_queue_xmit_nit(skb, dev);
2589
Koki Sanagiec764bf2011-05-30 21:48:34 +00002590 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002591 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002592 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002593 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002594 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002595 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002596 }
2597
Herbert Xu576a30e2006-06-27 13:22:38 -07002598gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002599 do {
2600 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002601
2602 skb->next = nskb->next;
2603 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002604
2605 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002606 * If device doesn't need nskb->dst, release it right now while
Krishna Kumar068a2de2009-12-09 20:59:58 +00002607 * its hot in this cpu cache
2608 */
2609 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2610 skb_dst_drop(nskb);
2611
Eric Dumazetb40863c2012-09-18 20:44:49 +00002612 if (!list_empty(&ptype_all))
2613 dev_queue_xmit_nit(nskb, dev);
2614
Koki Sanagiec764bf2011-05-30 21:48:34 +00002615 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002616 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002617 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002618 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002619 if (rc & ~NETDEV_TX_MASK)
2620 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002621 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002622 skb->next = nskb;
2623 return rc;
2624 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002625 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002626 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002627 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002628 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002629
Patrick McHardy572a9d72009-11-10 06:14:14 +00002630out_kfree_gso_skb:
2631 if (likely(skb->next == NULL))
2632 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002633out_kfree_skb:
2634 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002635out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002636 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002637}
2638
Tom Herbert0a9627f2010-03-16 08:03:29 +00002639static u32 hashrnd __read_mostly;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002640
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002641/*
2642 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2643 * to be used as a distribution range.
2644 */
2645u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2646 unsigned int num_tx_queues)
David S. Miller8f0f2222008-07-15 03:47:03 -07002647{
David S. Miller70192982009-01-27 16:34:47 -08002648 u32 hash;
John Fastabend4f57c082011-01-17 08:06:04 +00002649 u16 qoffset = 0;
2650 u16 qcount = num_tx_queues;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002651
David S. Miller513de112009-05-03 14:43:10 -07002652 if (skb_rx_queue_recorded(skb)) {
2653 hash = skb_get_rx_queue(skb);
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002654 while (unlikely(hash >= num_tx_queues))
2655 hash -= num_tx_queues;
David S. Miller513de112009-05-03 14:43:10 -07002656 return hash;
2657 }
Eric Dumazetec581f62009-05-01 09:05:06 -07002658
John Fastabend4f57c082011-01-17 08:06:04 +00002659 if (dev->num_tc) {
2660 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2661 qoffset = dev->tc_to_txq[tc].offset;
2662 qcount = dev->tc_to_txq[tc].count;
2663 }
2664
Eric Dumazetec581f62009-05-01 09:05:06 -07002665 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08002666 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07002667 else
Eric Dumazet62b1a8a2012-06-14 06:42:44 +00002668 hash = (__force u16) skb->protocol;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002669 hash = jhash_1word(hash, hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08002670
John Fastabend4f57c082011-01-17 08:06:04 +00002671 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
David S. Miller8f0f2222008-07-15 03:47:03 -07002672}
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002673EXPORT_SYMBOL(__skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07002674
Eric Dumazeted046422009-11-13 21:54:04 +00002675static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2676{
2677 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
Joe Perchese87cc472012-05-13 21:56:26 +00002678 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2679 dev->name, queue_index,
2680 dev->real_num_tx_queues);
Eric Dumazeted046422009-11-13 21:54:04 +00002681 return 0;
2682 }
2683 return queue_index;
2684}
2685
Tom Herbert1d24eb42010-11-21 13:17:27 +00002686static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2687{
Tom Herbertbf264142010-11-26 08:36:09 +00002688#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00002689 struct xps_dev_maps *dev_maps;
2690 struct xps_map *map;
2691 int queue_index = -1;
2692
2693 rcu_read_lock();
2694 dev_maps = rcu_dereference(dev->xps_maps);
2695 if (dev_maps) {
2696 map = rcu_dereference(
2697 dev_maps->cpu_map[raw_smp_processor_id()]);
2698 if (map) {
2699 if (map->len == 1)
2700 queue_index = map->queues[0];
2701 else {
2702 u32 hash;
2703 if (skb->sk && skb->sk->sk_hash)
2704 hash = skb->sk->sk_hash;
2705 else
2706 hash = (__force u16) skb->protocol ^
2707 skb->rxhash;
2708 hash = jhash_1word(hash, hashrnd);
2709 queue_index = map->queues[
2710 ((u64)hash * map->len) >> 32];
2711 }
2712 if (unlikely(queue_index >= dev->real_num_tx_queues))
2713 queue_index = -1;
2714 }
2715 }
2716 rcu_read_unlock();
2717
2718 return queue_index;
2719#else
2720 return -1;
2721#endif
2722}
2723
Alexander Duyck416186f2013-01-10 08:56:51 +00002724u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2725{
2726 struct sock *sk = skb->sk;
2727 int queue_index = sk_tx_queue_get(sk);
2728
2729 if (queue_index < 0 || skb->ooo_okay ||
2730 queue_index >= dev->real_num_tx_queues) {
2731 int new_index = get_xps_queue(dev, skb);
2732 if (new_index < 0)
2733 new_index = skb_tx_hash(dev, skb);
2734
2735 if (queue_index != new_index && sk) {
2736 struct dst_entry *dst =
2737 rcu_dereference_check(sk->sk_dst_cache, 1);
2738
2739 if (dst && skb_dst(skb) == dst)
2740 sk_tx_queue_set(sk, queue_index);
2741
2742 }
2743
2744 queue_index = new_index;
2745 }
2746
2747 return queue_index;
2748}
Alexander Duyck87696f92013-01-11 10:38:42 -08002749EXPORT_SYMBOL(__netdev_pick_tx);
Alexander Duyck416186f2013-01-10 08:56:51 +00002750
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002751struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2752 struct sk_buff *skb)
David S. Millere8a04642008-07-17 00:34:19 -07002753{
Alexander Duyck416186f2013-01-10 08:56:51 +00002754 int queue_index = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002755
Alexander Duyck416186f2013-01-10 08:56:51 +00002756 if (dev->real_num_tx_queues != 1) {
2757 const struct net_device_ops *ops = dev->netdev_ops;
2758 if (ops->ndo_select_queue)
2759 queue_index = ops->ndo_select_queue(dev, skb);
2760 else
2761 queue_index = __netdev_pick_tx(dev, skb);
Helmut Schaadeabc772010-09-03 02:39:56 +00002762 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002763 }
David S. Millereae792b2008-07-15 03:03:33 -07002764
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002765 skb_set_queue_mapping(skb, queue_index);
2766 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07002767}
2768
Eric Dumazet1def9232013-01-10 12:36:42 +00002769static void qdisc_pkt_len_init(struct sk_buff *skb)
2770{
2771 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2772
2773 qdisc_skb_cb(skb)->pkt_len = skb->len;
2774
2775 /* To get more precise estimation of bytes sent on wire,
2776 * we add to pkt_len the headers size of all segments
2777 */
2778 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002779 unsigned int hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002780
Eric Dumazet757b8b12013-01-15 21:14:21 -08002781 /* mac layer + network layer */
2782 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2783
2784 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002785 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2786 hdr_len += tcp_hdrlen(skb);
2787 else
2788 hdr_len += sizeof(struct udphdr);
2789 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
2790 }
2791}
2792
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002793static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2794 struct net_device *dev,
2795 struct netdev_queue *txq)
2796{
2797 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002798 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002799 int rc;
2800
Eric Dumazet1def9232013-01-10 12:36:42 +00002801 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002802 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002803 /*
2804 * Heuristic to force contended enqueues to serialize on a
2805 * separate lock before trying to get qdisc main lock.
2806 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2807 * and dequeue packets faster.
2808 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002809 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002810 if (unlikely(contended))
2811 spin_lock(&q->busylock);
2812
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002813 spin_lock(root_lock);
2814 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2815 kfree_skb(skb);
2816 rc = NET_XMIT_DROP;
2817 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002818 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002819 /*
2820 * This is a work-conserving queue; there are no old skbs
2821 * waiting to be sent out; and the qdisc is not running -
2822 * xmit the skb directly.
2823 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002824 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2825 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002826
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002827 qdisc_bstats_update(q, skb);
2828
Eric Dumazet79640a42010-06-02 05:09:29 -07002829 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2830 if (unlikely(contended)) {
2831 spin_unlock(&q->busylock);
2832 contended = false;
2833 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002834 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002835 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002836 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002837
2838 rc = NET_XMIT_SUCCESS;
2839 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002840 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002841 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002842 if (qdisc_run_begin(q)) {
2843 if (unlikely(contended)) {
2844 spin_unlock(&q->busylock);
2845 contended = false;
2846 }
2847 __qdisc_run(q);
2848 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002849 }
2850 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002851 if (unlikely(contended))
2852 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002853 return rc;
2854}
2855
Neil Horman5bc14212011-11-22 05:10:51 +00002856#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2857static void skb_update_prio(struct sk_buff *skb)
2858{
Igor Maravic6977a792011-11-25 07:44:54 +00002859 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002860
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002861 if (!skb->priority && skb->sk && map) {
2862 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2863
2864 if (prioidx < map->priomap_len)
2865 skb->priority = map->priomap[prioidx];
2866 }
Neil Horman5bc14212011-11-22 05:10:51 +00002867}
2868#else
2869#define skb_update_prio(skb)
2870#endif
2871
Eric Dumazet745e20f2010-09-29 13:23:09 -07002872static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002873#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002874
Dave Jonesd29f7492008-07-22 14:09:06 -07002875/**
Michel Machado95603e22012-06-12 10:16:35 +00002876 * dev_loopback_xmit - loop back @skb
2877 * @skb: buffer to transmit
2878 */
2879int dev_loopback_xmit(struct sk_buff *skb)
2880{
2881 skb_reset_mac_header(skb);
2882 __skb_pull(skb, skb_network_offset(skb));
2883 skb->pkt_type = PACKET_LOOPBACK;
2884 skb->ip_summed = CHECKSUM_UNNECESSARY;
2885 WARN_ON(!skb_dst(skb));
2886 skb_dst_force(skb);
2887 netif_rx_ni(skb);
2888 return 0;
2889}
2890EXPORT_SYMBOL(dev_loopback_xmit);
2891
2892/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002893 * dev_queue_xmit - transmit a buffer
2894 * @skb: buffer to transmit
2895 *
2896 * Queue a buffer for transmission to a network device. The caller must
2897 * have set the device and priority and built the buffer before calling
2898 * this function. The function can be called from an interrupt.
2899 *
2900 * A negative errno code is returned on a failure. A success does not
2901 * guarantee the frame will be transmitted as it may be dropped due
2902 * to congestion or traffic shaping.
2903 *
2904 * -----------------------------------------------------------------------------------
2905 * I notice this method can also return errors from the queue disciplines,
2906 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2907 * be positive.
2908 *
2909 * Regardless of the return value, the skb is consumed, so it is currently
2910 * difficult to retry a send to this method. (You can bump the ref count
2911 * before sending to hold a reference for retry if you are careful.)
2912 *
2913 * When calling this method, interrupts MUST be enabled. This is because
2914 * the BH enable code must have IRQs enabled so that it will not deadlock.
2915 * --BLG
2916 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917int dev_queue_xmit(struct sk_buff *skb)
2918{
2919 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002920 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 struct Qdisc *q;
2922 int rc = -ENOMEM;
2923
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002924 /* Disable soft irqs for various locks below. Also
2925 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002927 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
Neil Horman5bc14212011-11-22 05:10:51 +00002929 skb_update_prio(skb);
2930
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002931 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002932 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002933
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002935 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002937 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002939 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002940 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 }
2942
2943 /* The device has no queue. Common case for software devices:
2944 loopback, all the sorts of tunnels...
2945
Herbert Xu932ff272006-06-09 12:20:56 -07002946 Really, it is unlikely that netif_tx_lock protection is necessary
2947 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 counters.)
2949 However, it is possible, that they rely on protection
2950 made by us here.
2951
2952 Check this and shot the lock. It is not prone from deadlocks.
2953 Either shot noqueue qdisc, it is even simpler 8)
2954 */
2955 if (dev->flags & IFF_UP) {
2956 int cpu = smp_processor_id(); /* ok because BHs are off */
2957
David S. Millerc773e842008-07-08 23:13:53 -07002958 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
Eric Dumazet745e20f2010-09-29 13:23:09 -07002960 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2961 goto recursion_alert;
2962
David S. Millerc773e842008-07-08 23:13:53 -07002963 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964
Tom Herbert734664982011-11-28 16:32:44 +00002965 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002966 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002967 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002968 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002969 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002970 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 goto out;
2972 }
2973 }
David S. Millerc773e842008-07-08 23:13:53 -07002974 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002975 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2976 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977 } else {
2978 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002979 * unfortunately
2980 */
2981recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002982 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2983 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 }
2985 }
2986
2987 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002988 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 kfree_skb(skb);
2991 return rc;
2992out:
Herbert Xud4828d82006-06-22 02:28:18 -07002993 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 return rc;
2995}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002996EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997
2998
2999/*=======================================================================
3000 Receiver routines
3001 =======================================================================*/
3002
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003003int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00003004EXPORT_SYMBOL(netdev_max_backlog);
3005
Eric Dumazet3b098e22010-05-15 23:57:10 -07003006int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003007int netdev_budget __read_mostly = 300;
3008int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003010/* Called with irq disabled */
3011static inline void ____napi_schedule(struct softnet_data *sd,
3012 struct napi_struct *napi)
3013{
3014 list_add_tail(&napi->poll_list, &sd->poll_list);
3015 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3016}
3017
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003018/*
3019 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
Tom Herbertbdeab992011-08-14 19:45:55 +00003020 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
3021 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
3022 * if hash is a canonical 4-tuple hash over transport ports.
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003023 */
Tom Herbertbdeab992011-08-14 19:45:55 +00003024void __skb_get_rxhash(struct sk_buff *skb)
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003025{
Eric Dumazet4504b862011-11-28 05:23:23 +00003026 struct flow_keys keys;
3027 u32 hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003028
Eric Dumazet4504b862011-11-28 05:23:23 +00003029 if (!skb_flow_dissect(skb, &keys))
3030 return;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003031
Chema Gonzalez68622342012-09-07 13:40:50 +00003032 if (keys.ports)
Eric Dumazet4504b862011-11-28 05:23:23 +00003033 skb->l4_rxhash = 1;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003034
3035 /* get a consistent hash (same value on both flow directions) */
Chema Gonzalez68622342012-09-07 13:40:50 +00003036 if (((__force u32)keys.dst < (__force u32)keys.src) ||
3037 (((__force u32)keys.dst == (__force u32)keys.src) &&
3038 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
Eric Dumazet4504b862011-11-28 05:23:23 +00003039 swap(keys.dst, keys.src);
Chema Gonzalez68622342012-09-07 13:40:50 +00003040 swap(keys.port16[0], keys.port16[1]);
3041 }
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003042
Eric Dumazet4504b862011-11-28 05:23:23 +00003043 hash = jhash_3words((__force u32)keys.dst,
3044 (__force u32)keys.src,
3045 (__force u32)keys.ports, hashrnd);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003046 if (!hash)
3047 hash = 1;
3048
Tom Herbertbdeab992011-08-14 19:45:55 +00003049 skb->rxhash = hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003050}
3051EXPORT_SYMBOL(__skb_get_rxhash);
3052
Eric Dumazetdf334542010-03-24 19:13:54 +00003053#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003054
3055/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003056struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003057EXPORT_SYMBOL(rps_sock_flow_table);
3058
Ingo Molnarc5905af2012-02-24 08:31:31 +01003059struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00003060
Ben Hutchingsc4454772011-01-19 11:03:53 +00003061static struct rps_dev_flow *
3062set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3063 struct rps_dev_flow *rflow, u16 next_cpu)
3064{
Ben Hutchings09994d12011-10-03 04:42:46 +00003065 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003066#ifdef CONFIG_RFS_ACCEL
3067 struct netdev_rx_queue *rxqueue;
3068 struct rps_dev_flow_table *flow_table;
3069 struct rps_dev_flow *old_rflow;
3070 u32 flow_id;
3071 u16 rxq_index;
3072 int rc;
3073
3074 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003075 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3076 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003077 goto out;
3078 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3079 if (rxq_index == skb_get_rx_queue(skb))
3080 goto out;
3081
3082 rxqueue = dev->_rx + rxq_index;
3083 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3084 if (!flow_table)
3085 goto out;
3086 flow_id = skb->rxhash & flow_table->mask;
3087 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3088 rxq_index, flow_id);
3089 if (rc < 0)
3090 goto out;
3091 old_rflow = rflow;
3092 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003093 rflow->filter = rc;
3094 if (old_rflow->filter == rflow->filter)
3095 old_rflow->filter = RPS_NO_FILTER;
3096 out:
3097#endif
3098 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003099 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003100 }
3101
Ben Hutchings09994d12011-10-03 04:42:46 +00003102 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003103 return rflow;
3104}
3105
Tom Herbert0a9627f2010-03-16 08:03:29 +00003106/*
3107 * get_rps_cpu is called from netif_receive_skb and returns the target
3108 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003109 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003110 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003111static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3112 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003113{
Tom Herbert0a9627f2010-03-16 08:03:29 +00003114 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003115 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07003116 struct rps_dev_flow_table *flow_table;
3117 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003118 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07003119 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003120
Tom Herbert0a9627f2010-03-16 08:03:29 +00003121 if (skb_rx_queue_recorded(skb)) {
3122 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003123 if (unlikely(index >= dev->real_num_rx_queues)) {
3124 WARN_ONCE(dev->real_num_rx_queues > 1,
3125 "%s received packet on queue %u, but number "
3126 "of RX queues is %u\n",
3127 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003128 goto done;
3129 }
3130 rxqueue = dev->_rx + index;
3131 } else
3132 rxqueue = dev->_rx;
3133
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003134 map = rcu_dereference(rxqueue->rps_map);
3135 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08003136 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003137 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003138 tcpu = map->cpus[0];
3139 if (cpu_online(tcpu))
3140 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003141 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003142 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003143 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003144 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003145 }
3146
Changli Gao2d47b452010-08-17 19:00:56 +00003147 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003148 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003149 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003150
Tom Herbertfec5e652010-04-16 16:01:27 -07003151 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3152 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3153 if (flow_table && sock_flow_table) {
3154 u16 next_cpu;
3155 struct rps_dev_flow *rflow;
3156
3157 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3158 tcpu = rflow->cpu;
3159
3160 next_cpu = sock_flow_table->ents[skb->rxhash &
3161 sock_flow_table->mask];
3162
3163 /*
3164 * If the desired CPU (where last recvmsg was done) is
3165 * different from current CPU (one in the rx-queue flow
3166 * table entry), switch if one of the following holds:
3167 * - Current CPU is unset (equal to RPS_NO_CPU).
3168 * - Current CPU is offline.
3169 * - The current CPU's queue tail has advanced beyond the
3170 * last packet that was enqueued using this table entry.
3171 * This guarantees that all previous packets for the flow
3172 * have been dequeued, thus preserving in order delivery.
3173 */
3174 if (unlikely(tcpu != next_cpu) &&
3175 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3176 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003177 rflow->last_qtail)) >= 0)) {
3178 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003179 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003180 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003181
Tom Herbertfec5e652010-04-16 16:01:27 -07003182 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3183 *rflowp = rflow;
3184 cpu = tcpu;
3185 goto done;
3186 }
3187 }
3188
Tom Herbert0a9627f2010-03-16 08:03:29 +00003189 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003190 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003191
3192 if (cpu_online(tcpu)) {
3193 cpu = tcpu;
3194 goto done;
3195 }
3196 }
3197
3198done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003199 return cpu;
3200}
3201
Ben Hutchingsc4454772011-01-19 11:03:53 +00003202#ifdef CONFIG_RFS_ACCEL
3203
3204/**
3205 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3206 * @dev: Device on which the filter was set
3207 * @rxq_index: RX queue index
3208 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3209 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3210 *
3211 * Drivers that implement ndo_rx_flow_steer() should periodically call
3212 * this function for each installed filter and remove the filters for
3213 * which it returns %true.
3214 */
3215bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3216 u32 flow_id, u16 filter_id)
3217{
3218 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3219 struct rps_dev_flow_table *flow_table;
3220 struct rps_dev_flow *rflow;
3221 bool expire = true;
3222 int cpu;
3223
3224 rcu_read_lock();
3225 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3226 if (flow_table && flow_id <= flow_table->mask) {
3227 rflow = &flow_table->flows[flow_id];
3228 cpu = ACCESS_ONCE(rflow->cpu);
3229 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3230 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3231 rflow->last_qtail) <
3232 (int)(10 * flow_table->mask)))
3233 expire = false;
3234 }
3235 rcu_read_unlock();
3236 return expire;
3237}
3238EXPORT_SYMBOL(rps_may_expire_flow);
3239
3240#endif /* CONFIG_RFS_ACCEL */
3241
Tom Herbert0a9627f2010-03-16 08:03:29 +00003242/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003243static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003244{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003245 struct softnet_data *sd = data;
3246
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003247 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003248 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003249}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003250
Tom Herbertfec5e652010-04-16 16:01:27 -07003251#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003252
3253/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003254 * Check if this softnet_data structure is another cpu one
3255 * If yes, queue it to our IPI list and return 1
3256 * If no, return 0
3257 */
3258static int rps_ipi_queued(struct softnet_data *sd)
3259{
3260#ifdef CONFIG_RPS
3261 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3262
3263 if (sd != mysd) {
3264 sd->rps_ipi_next = mysd->rps_ipi_list;
3265 mysd->rps_ipi_list = sd;
3266
3267 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3268 return 1;
3269 }
3270#endif /* CONFIG_RPS */
3271 return 0;
3272}
3273
3274/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003275 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3276 * queue (may be a remote CPU queue).
3277 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003278static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3279 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003280{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003281 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003282 unsigned long flags;
3283
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003284 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003285
3286 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003287
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003288 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003289 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3290 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003291enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003292 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003293 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003294 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003295 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003296 return NET_RX_SUCCESS;
3297 }
3298
Eric Dumazetebda37c22010-05-06 23:51:21 +00003299 /* Schedule NAPI for backlog device
3300 * We can use non atomic operation since we own the queue lock
3301 */
3302 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003303 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003304 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003305 }
3306 goto enqueue;
3307 }
3308
Changli Gaodee42872010-05-02 05:42:16 +00003309 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003310 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003311
Tom Herbert0a9627f2010-03-16 08:03:29 +00003312 local_irq_restore(flags);
3313
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003314 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003315 kfree_skb(skb);
3316 return NET_RX_DROP;
3317}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319/**
3320 * netif_rx - post buffer to the network code
3321 * @skb: buffer to post
3322 *
3323 * This function receives a packet from a device driver and queues it for
3324 * the upper (protocol) levels to process. It always succeeds. The buffer
3325 * may be dropped during processing for congestion control or by the
3326 * protocol layers.
3327 *
3328 * return values:
3329 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 * NET_RX_DROP (packet was dropped)
3331 *
3332 */
3333
3334int netif_rx(struct sk_buff *skb)
3335{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003336 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
3338 /* if netpoll wants it, pretend we never saw it */
3339 if (netpoll_rx(skb))
3340 return NET_RX_DROP;
3341
Eric Dumazet588f0332011-11-15 04:12:55 +00003342 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343
Koki Sanagicf66ba52010-08-23 18:45:02 +09003344 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003345#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003346 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003347 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003348 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349
Changli Gaocece1942010-08-07 20:35:43 -07003350 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003351 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003352
3353 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003354 if (cpu < 0)
3355 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003356
3357 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3358
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003359 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003360 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003361 } else
3362#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003363 {
3364 unsigned int qtail;
3365 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3366 put_cpu();
3367 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003368 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003370EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371
3372int netif_rx_ni(struct sk_buff *skb)
3373{
3374 int err;
3375
3376 preempt_disable();
3377 err = netif_rx(skb);
3378 if (local_softirq_pending())
3379 do_softirq();
3380 preempt_enable();
3381
3382 return err;
3383}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384EXPORT_SYMBOL(netif_rx_ni);
3385
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386static void net_tx_action(struct softirq_action *h)
3387{
3388 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3389
3390 if (sd->completion_queue) {
3391 struct sk_buff *clist;
3392
3393 local_irq_disable();
3394 clist = sd->completion_queue;
3395 sd->completion_queue = NULL;
3396 local_irq_enable();
3397
3398 while (clist) {
3399 struct sk_buff *skb = clist;
3400 clist = clist->next;
3401
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003402 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003403 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 __kfree_skb(skb);
3405 }
3406 }
3407
3408 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003409 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410
3411 local_irq_disable();
3412 head = sd->output_queue;
3413 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003414 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 local_irq_enable();
3416
3417 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003418 struct Qdisc *q = head;
3419 spinlock_t *root_lock;
3420
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 head = head->next_sched;
3422
David S. Miller5fb66222008-08-02 20:02:43 -07003423 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003424 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003425 smp_mb__before_clear_bit();
3426 clear_bit(__QDISC_STATE_SCHED,
3427 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003428 qdisc_run(q);
3429 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003431 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003432 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003433 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003434 } else {
3435 smp_mb__before_clear_bit();
3436 clear_bit(__QDISC_STATE_SCHED,
3437 &q->state);
3438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 }
3440 }
3441 }
3442}
3443
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003444#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3445 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003446/* This hook is defined here for ATM LANE */
3447int (*br_fdb_test_addr_hook)(struct net_device *dev,
3448 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003449EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003450#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452#ifdef CONFIG_NET_CLS_ACT
3453/* TODO: Maybe we should just force sch_ingress to be compiled in
3454 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3455 * a compare and 2 stores extra right now if we dont have it on
3456 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003457 * NOTE: This doesn't stop any functionality; if you dont have
3458 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 *
3460 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003461static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003464 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003465 int result = TC_ACT_OK;
3466 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003467
Stephen Hemmingerde384832010-08-01 00:33:23 -07003468 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003469 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3470 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003471 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 }
3473
Herbert Xuf697c3e2007-10-14 00:38:47 -07003474 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3475 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3476
David S. Miller83874002008-07-17 00:53:03 -07003477 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003478 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003479 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003480 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3481 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003482 spin_unlock(qdisc_lock(q));
3483 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003484
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 return result;
3486}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003487
3488static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3489 struct packet_type **pt_prev,
3490 int *ret, struct net_device *orig_dev)
3491{
Eric Dumazet24824a02010-10-02 06:11:55 +00003492 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3493
3494 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003495 goto out;
3496
3497 if (*pt_prev) {
3498 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3499 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003500 }
3501
Eric Dumazet24824a02010-10-02 06:11:55 +00003502 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003503 case TC_ACT_SHOT:
3504 case TC_ACT_STOLEN:
3505 kfree_skb(skb);
3506 return NULL;
3507 }
3508
3509out:
3510 skb->tc_verd = 0;
3511 return skb;
3512}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513#endif
3514
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003515/**
3516 * netdev_rx_handler_register - register receive handler
3517 * @dev: device to register a handler for
3518 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003519 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003520 *
3521 * Register a receive hander for a device. This handler will then be
3522 * called from __netif_receive_skb. A negative errno code is returned
3523 * on a failure.
3524 *
3525 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003526 *
3527 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003528 */
3529int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003530 rx_handler_func_t *rx_handler,
3531 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003532{
3533 ASSERT_RTNL();
3534
3535 if (dev->rx_handler)
3536 return -EBUSY;
3537
Jiri Pirko93e2c322010-06-10 03:34:59 +00003538 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003539 rcu_assign_pointer(dev->rx_handler, rx_handler);
3540
3541 return 0;
3542}
3543EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3544
3545/**
3546 * netdev_rx_handler_unregister - unregister receive handler
3547 * @dev: device to unregister a handler from
3548 *
3549 * Unregister a receive hander from a device.
3550 *
3551 * The caller must hold the rtnl_mutex.
3552 */
3553void netdev_rx_handler_unregister(struct net_device *dev)
3554{
3555
3556 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003557 RCU_INIT_POINTER(dev->rx_handler, NULL);
3558 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003559}
3560EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3561
Mel Gormanb4b9e352012-07-31 16:44:26 -07003562/*
3563 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3564 * the special handling of PFMEMALLOC skbs.
3565 */
3566static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3567{
3568 switch (skb->protocol) {
3569 case __constant_htons(ETH_P_ARP):
3570 case __constant_htons(ETH_P_IP):
3571 case __constant_htons(ETH_P_IPV6):
3572 case __constant_htons(ETH_P_8021Q):
3573 return true;
3574 default:
3575 return false;
3576 }
3577}
3578
Eric Dumazet10f744d2010-03-28 23:07:20 -07003579static int __netif_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580{
3581 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003582 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003583 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003584 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003585 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003587 __be16 type;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003588 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589
Eric Dumazet588f0332011-11-15 04:12:55 +00003590 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003591
Koki Sanagicf66ba52010-08-23 18:45:02 +09003592 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003593
Mel Gormanb4b9e352012-07-31 16:44:26 -07003594 /*
3595 * PFMEMALLOC skbs are special, they should
3596 * - be delivered to SOCK_MEMALLOC sockets only
3597 * - stay away from userspace
3598 * - have bounded memory usage
3599 *
3600 * Use PF_MEMALLOC as this saves us from propagating the allocation
3601 * context down to all allocation sites.
3602 */
3603 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3604 current->flags |= PF_MEMALLOC;
3605
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003607 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003608 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003610 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003611
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003612 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003613 if (!skb_transport_header_was_set(skb))
3614 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003615 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
3617 pt_prev = NULL;
3618
3619 rcu_read_lock();
3620
David S. Miller63d8ea72011-02-28 10:48:59 -08003621another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003622 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003623
3624 __this_cpu_inc(softnet_data.processed);
3625
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003626 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3627 skb = vlan_untag(skb);
3628 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003629 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003630 }
3631
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632#ifdef CONFIG_NET_CLS_ACT
3633 if (skb->tc_verd & TC_NCLS) {
3634 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3635 goto ncls;
3636 }
3637#endif
3638
Mel Gormanb4b9e352012-07-31 16:44:26 -07003639 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3640 goto skip_taps;
3641
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003643 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003644 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003645 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646 pt_prev = ptype;
3647 }
3648 }
3649
Mel Gormanb4b9e352012-07-31 16:44:26 -07003650skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003652 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3653 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003654 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655ncls:
3656#endif
3657
Mel Gormanb4b9e352012-07-31 16:44:26 -07003658 if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3659 && !skb_pfmemalloc_protocol(skb))
3660 goto drop;
3661
John Fastabend24257172011-10-10 09:16:41 +00003662 if (vlan_tx_tag_present(skb)) {
3663 if (pt_prev) {
3664 ret = deliver_skb(skb, pt_prev, orig_dev);
3665 pt_prev = NULL;
3666 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003667 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003668 goto another_round;
3669 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003670 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003671 }
3672
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003673 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003674 if (rx_handler) {
3675 if (pt_prev) {
3676 ret = deliver_skb(skb, pt_prev, orig_dev);
3677 pt_prev = NULL;
3678 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003679 switch (rx_handler(&skb)) {
3680 case RX_HANDLER_CONSUMED:
Mel Gormanb4b9e352012-07-31 16:44:26 -07003681 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003682 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003683 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003684 case RX_HANDLER_EXACT:
3685 deliver_exact = true;
3686 case RX_HANDLER_PASS:
3687 break;
3688 default:
3689 BUG();
3690 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003691 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003693 if (vlan_tx_nonzero_tag_present(skb))
3694 skb->pkt_type = PACKET_OTHERHOST;
3695
David S. Miller63d8ea72011-02-28 10:48:59 -08003696 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003697 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003698
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003700 list_for_each_entry_rcu(ptype,
3701 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003702 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003703 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3704 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003705 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003706 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707 pt_prev = ptype;
3708 }
3709 }
3710
3711 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003712 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003713 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003714 else
3715 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003717drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003718 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 kfree_skb(skb);
3720 /* Jamal, now you will not able to escape explaining
3721 * me how you were going to use this. :-)
3722 */
3723 ret = NET_RX_DROP;
3724 }
3725
Mel Gormanb4b9e352012-07-31 16:44:26 -07003726unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003728out:
3729 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730 return ret;
3731}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003732
3733/**
3734 * netif_receive_skb - process receive buffer from network
3735 * @skb: buffer to process
3736 *
3737 * netif_receive_skb() is the main receive data processing function.
3738 * It always succeeds. The buffer may be dropped during processing
3739 * for congestion control or by the protocol layers.
3740 *
3741 * This function may only be called from softirq context and interrupts
3742 * should be enabled.
3743 *
3744 * Return values (usually ignored):
3745 * NET_RX_SUCCESS: no congestion
3746 * NET_RX_DROP: packet was dropped
3747 */
3748int netif_receive_skb(struct sk_buff *skb)
3749{
Eric Dumazet588f0332011-11-15 04:12:55 +00003750 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003751
Richard Cochranc1f19b52010-07-17 08:49:36 +00003752 if (skb_defer_rx_timestamp(skb))
3753 return NET_RX_SUCCESS;
3754
Eric Dumazetdf334542010-03-24 19:13:54 +00003755#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003756 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003757 struct rps_dev_flow voidflow, *rflow = &voidflow;
3758 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003759
Eric Dumazet3b098e22010-05-15 23:57:10 -07003760 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003761
Eric Dumazet3b098e22010-05-15 23:57:10 -07003762 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003763
Eric Dumazet3b098e22010-05-15 23:57:10 -07003764 if (cpu >= 0) {
3765 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3766 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003767 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003768 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003769 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003770 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003771#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003772 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003773}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003774EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775
Eric Dumazet88751272010-04-19 05:07:33 +00003776/* Network device is going away, flush any packets still pending
3777 * Called with irqs disabled.
3778 */
Changli Gao152102c2010-03-30 20:16:22 +00003779static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003780{
Changli Gao152102c2010-03-30 20:16:22 +00003781 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003782 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003783 struct sk_buff *skb, *tmp;
3784
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003785 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003786 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003787 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003788 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003789 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003790 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003791 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003792 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003793 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003794
3795 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3796 if (skb->dev == dev) {
3797 __skb_unlink(skb, &sd->process_queue);
3798 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003799 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003800 }
3801 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003802}
3803
Herbert Xud565b0a2008-12-15 23:38:52 -08003804static int napi_gro_complete(struct sk_buff *skb)
3805{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003806 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003807 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003808 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003809 int err = -ENOENT;
3810
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003811 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3812
Herbert Xufc59f9a2009-04-14 15:11:06 -07003813 if (NAPI_GRO_CB(skb)->count == 1) {
3814 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003815 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003816 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003817
3818 rcu_read_lock();
3819 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003820 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003821 continue;
3822
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003823 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003824 break;
3825 }
3826 rcu_read_unlock();
3827
3828 if (err) {
3829 WARN_ON(&ptype->list == head);
3830 kfree_skb(skb);
3831 return NET_RX_SUCCESS;
3832 }
3833
3834out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003835 return netif_receive_skb(skb);
3836}
3837
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003838/* napi->gro_list contains packets ordered by age.
3839 * youngest packets at the head of it.
3840 * Complete skbs in reverse order to reduce latencies.
3841 */
3842void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003843{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003844 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003845
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003846 /* scan list and build reverse chain */
3847 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3848 skb->prev = prev;
3849 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003850 }
3851
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003852 for (skb = prev; skb; skb = prev) {
3853 skb->next = NULL;
3854
3855 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3856 return;
3857
3858 prev = skb->prev;
3859 napi_gro_complete(skb);
3860 napi->gro_count--;
3861 }
3862
Herbert Xud565b0a2008-12-15 23:38:52 -08003863 napi->gro_list = NULL;
3864}
Eric Dumazet86cac582010-08-31 18:25:32 +00003865EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003866
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003867static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3868{
3869 struct sk_buff *p;
3870 unsigned int maclen = skb->dev->hard_header_len;
3871
3872 for (p = napi->gro_list; p; p = p->next) {
3873 unsigned long diffs;
3874
3875 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3876 diffs |= p->vlan_tci ^ skb->vlan_tci;
3877 if (maclen == ETH_HLEN)
3878 diffs |= compare_ether_header(skb_mac_header(p),
3879 skb_gro_mac_header(skb));
3880 else if (!diffs)
3881 diffs = memcmp(skb_mac_header(p),
3882 skb_gro_mac_header(skb),
3883 maclen);
3884 NAPI_GRO_CB(p)->same_flow = !diffs;
3885 NAPI_GRO_CB(p)->flush = 0;
3886 }
3887}
3888
Rami Rosenbb728822012-11-28 21:55:25 +00003889static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003890{
3891 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003892 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003893 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003894 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003895 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08003896 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003897 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003898
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003899 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003900 goto normal;
3901
David S. Miller21dc3302010-08-23 00:13:46 -07003902 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003903 goto normal;
3904
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003905 gro_list_prepare(napi, skb);
3906
Herbert Xud565b0a2008-12-15 23:38:52 -08003907 rcu_read_lock();
3908 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003909 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003910 continue;
3911
Herbert Xu86911732009-01-29 14:19:50 +00003912 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08003913 mac_len = skb->network_header - skb->mac_header;
3914 skb->mac_len = mac_len;
3915 NAPI_GRO_CB(skb)->same_flow = 0;
3916 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003917 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003918
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003919 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003920 break;
3921 }
3922 rcu_read_unlock();
3923
3924 if (&ptype->list == head)
3925 goto normal;
3926
Herbert Xu0da2afd52008-12-26 14:57:42 -08003927 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003928 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003929
Herbert Xud565b0a2008-12-15 23:38:52 -08003930 if (pp) {
3931 struct sk_buff *nskb = *pp;
3932
3933 *pp = nskb->next;
3934 nskb->next = NULL;
3935 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003936 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003937 }
3938
Herbert Xu0da2afd52008-12-26 14:57:42 -08003939 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003940 goto ok;
3941
Herbert Xu4ae55442009-02-08 18:00:36 +00003942 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003943 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003944
Herbert Xu4ae55442009-02-08 18:00:36 +00003945 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003946 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003947 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003948 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003949 skb->next = napi->gro_list;
3950 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003951 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003952
Herbert Xuad0f9902009-02-01 01:24:55 -08003953pull:
Herbert Xucb189782009-05-26 18:50:31 +00003954 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3955 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3956
3957 BUG_ON(skb->end - skb->tail < grow);
3958
3959 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3960
3961 skb->tail += grow;
3962 skb->data_len -= grow;
3963
3964 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003965 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003966
Eric Dumazet9e903e02011-10-18 21:00:24 +00003967 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003968 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003969 memmove(skb_shinfo(skb)->frags,
3970 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003971 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003972 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003973 }
3974
Herbert Xud565b0a2008-12-15 23:38:52 -08003975ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003976 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003977
3978normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003979 ret = GRO_NORMAL;
3980 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003981}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003982
Herbert Xu96e93ea2009-01-06 10:49:34 -08003983
Rami Rosenbb728822012-11-28 21:55:25 +00003984static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003985{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003986 switch (ret) {
3987 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003988 if (netif_receive_skb(skb))
3989 ret = GRO_DROP;
3990 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003991
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003992 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003993 kfree_skb(skb);
3994 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003995
Eric Dumazetdaa86542012-04-19 07:07:40 +00003996 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003997 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3998 kmem_cache_free(skbuff_head_cache, skb);
3999 else
4000 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00004001 break;
4002
Ben Hutchings5b252f02009-10-29 07:17:09 +00004003 case GRO_HELD:
4004 case GRO_MERGED:
4005 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004006 }
4007
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004008 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004009}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004010
Eric Dumazetca07e432012-10-06 22:28:06 +00004011static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00004012{
Eric Dumazetca07e432012-10-06 22:28:06 +00004013 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4014 const skb_frag_t *frag0 = &pinfo->frags[0];
4015
Herbert Xu78a478d2009-05-26 18:50:21 +00004016 NAPI_GRO_CB(skb)->data_offset = 0;
4017 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00004018 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00004019
Herbert Xu78d3fd02009-05-26 18:50:23 +00004020 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00004021 pinfo->nr_frags &&
4022 !PageHighMem(skb_frag_page(frag0))) {
4023 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4024 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00004025 }
Herbert Xu78a478d2009-05-26 18:50:21 +00004026}
Herbert Xu78a478d2009-05-26 18:50:21 +00004027
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004028gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004029{
Herbert Xu86911732009-01-29 14:19:50 +00004030 skb_gro_reset_offset(skb);
4031
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004032 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004033}
4034EXPORT_SYMBOL(napi_gro_receive);
4035
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004036static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004037{
Herbert Xu96e93ea2009-01-06 10:49:34 -08004038 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004039 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4040 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004041 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004042 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004043 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004044
4045 napi->skb = skb;
4046}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004047
Herbert Xu76620aa2009-04-16 02:02:07 -07004048struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004049{
Herbert Xu5d38a072009-01-04 16:13:40 -08004050 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004051
4052 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00004053 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
4054 if (skb)
4055 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004056 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004057 return skb;
4058}
Herbert Xu76620aa2009-04-16 02:02:07 -07004059EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004060
Rami Rosenbb728822012-11-28 21:55:25 +00004061static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004062 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004063{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004064 switch (ret) {
4065 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00004066 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00004067 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00004068
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004069 if (ret == GRO_HELD)
4070 skb_gro_pull(skb, -ETH_HLEN);
4071 else if (netif_receive_skb(skb))
4072 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004073 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004074
4075 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004076 case GRO_MERGED_FREE:
4077 napi_reuse_skb(napi, skb);
4078 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004079
4080 case GRO_MERGED:
4081 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004082 }
4083
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004084 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004085}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004086
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004087static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004088{
Herbert Xu76620aa2009-04-16 02:02:07 -07004089 struct sk_buff *skb = napi->skb;
4090 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00004091 unsigned int hlen;
4092 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07004093
4094 napi->skb = NULL;
4095
4096 skb_reset_mac_header(skb);
4097 skb_gro_reset_offset(skb);
4098
Herbert Xua5b1cf22009-05-26 18:50:28 +00004099 off = skb_gro_offset(skb);
4100 hlen = off + sizeof(*eth);
4101 eth = skb_gro_header_fast(skb, off);
4102 if (skb_gro_header_hard(skb, hlen)) {
4103 eth = skb_gro_header_slow(skb, hlen, off);
4104 if (unlikely(!eth)) {
4105 napi_reuse_skb(napi, skb);
4106 skb = NULL;
4107 goto out;
4108 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004109 }
4110
4111 skb_gro_pull(skb, sizeof(*eth));
4112
4113 /*
4114 * This works because the only protocols we care about don't require
4115 * special handling. We'll fix it up properly at the end.
4116 */
4117 skb->protocol = eth->h_proto;
4118
4119out:
4120 return skb;
4121}
Herbert Xu76620aa2009-04-16 02:02:07 -07004122
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004123gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004124{
4125 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004126
4127 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004128 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004129
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004130 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004131}
4132EXPORT_SYMBOL(napi_gro_frags);
4133
Eric Dumazete326bed2010-04-22 00:22:45 -07004134/*
4135 * net_rps_action sends any pending IPI's for rps.
4136 * Note: called with local irq disabled, but exits with local irq enabled.
4137 */
4138static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4139{
4140#ifdef CONFIG_RPS
4141 struct softnet_data *remsd = sd->rps_ipi_list;
4142
4143 if (remsd) {
4144 sd->rps_ipi_list = NULL;
4145
4146 local_irq_enable();
4147
4148 /* Send pending IPI's to kick RPS processing on remote cpus. */
4149 while (remsd) {
4150 struct softnet_data *next = remsd->rps_ipi_next;
4151
4152 if (cpu_online(remsd->cpu))
4153 __smp_call_function_single(remsd->cpu,
4154 &remsd->csd, 0);
4155 remsd = next;
4156 }
4157 } else
4158#endif
4159 local_irq_enable();
4160}
4161
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004162static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163{
4164 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004165 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166
Eric Dumazete326bed2010-04-22 00:22:45 -07004167#ifdef CONFIG_RPS
4168 /* Check if we have pending ipi, its better to send them now,
4169 * not waiting net_rx_action() end.
4170 */
4171 if (sd->rps_ipi_list) {
4172 local_irq_disable();
4173 net_rps_action_and_irq_enable(sd);
4174 }
4175#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004176 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004177 local_irq_disable();
4178 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004180 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181
Changli Gao6e7676c2010-04-27 15:07:33 -07004182 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004183 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004184 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004185 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004186 input_queue_head_incr(sd);
4187 if (++work >= quota) {
4188 local_irq_enable();
4189 return work;
4190 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192
Changli Gao6e7676c2010-04-27 15:07:33 -07004193 rps_lock(sd);
4194 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004195 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004196 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4197 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004198
Changli Gao6e7676c2010-04-27 15:07:33 -07004199 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004200 /*
4201 * Inline a custom version of __napi_complete().
4202 * only current cpu owns and manipulates this napi,
4203 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4204 * we can use a plain write instead of clear_bit(),
4205 * and we dont need an smp_mb() memory barrier.
4206 */
4207 list_del(&napi->poll_list);
4208 napi->state = 0;
4209
Changli Gao6e7676c2010-04-27 15:07:33 -07004210 quota = work + qlen;
4211 }
4212 rps_unlock(sd);
4213 }
4214 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004216 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217}
4218
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004219/**
4220 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004221 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004222 *
4223 * The entry's receive function will be scheduled to run
4224 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004225void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004226{
4227 unsigned long flags;
4228
4229 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004230 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004231 local_irq_restore(flags);
4232}
4233EXPORT_SYMBOL(__napi_schedule);
4234
Herbert Xud565b0a2008-12-15 23:38:52 -08004235void __napi_complete(struct napi_struct *n)
4236{
4237 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4238 BUG_ON(n->gro_list);
4239
4240 list_del(&n->poll_list);
4241 smp_mb__before_clear_bit();
4242 clear_bit(NAPI_STATE_SCHED, &n->state);
4243}
4244EXPORT_SYMBOL(__napi_complete);
4245
4246void napi_complete(struct napi_struct *n)
4247{
4248 unsigned long flags;
4249
4250 /*
4251 * don't let napi dequeue from the cpu poll list
4252 * just in case its running on a different cpu
4253 */
4254 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4255 return;
4256
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004257 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004258 local_irq_save(flags);
4259 __napi_complete(n);
4260 local_irq_restore(flags);
4261}
4262EXPORT_SYMBOL(napi_complete);
4263
4264void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4265 int (*poll)(struct napi_struct *, int), int weight)
4266{
4267 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004268 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004269 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004270 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004271 napi->poll = poll;
4272 napi->weight = weight;
4273 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004274 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004275#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004276 spin_lock_init(&napi->poll_lock);
4277 napi->poll_owner = -1;
4278#endif
4279 set_bit(NAPI_STATE_SCHED, &napi->state);
4280}
4281EXPORT_SYMBOL(netif_napi_add);
4282
4283void netif_napi_del(struct napi_struct *napi)
4284{
4285 struct sk_buff *skb, *next;
4286
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004287 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004288 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004289
4290 for (skb = napi->gro_list; skb; skb = next) {
4291 next = skb->next;
4292 skb->next = NULL;
4293 kfree_skb(skb);
4294 }
4295
4296 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004297 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004298}
4299EXPORT_SYMBOL(netif_napi_del);
4300
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301static void net_rx_action(struct softirq_action *h)
4302{
Eric Dumazete326bed2010-04-22 00:22:45 -07004303 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004304 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004305 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004306 void *have;
4307
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308 local_irq_disable();
4309
Eric Dumazete326bed2010-04-22 00:22:45 -07004310 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004311 struct napi_struct *n;
4312 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004314 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004315 * Allow this to run for 2 jiffies since which will allow
4316 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004317 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004318 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 goto softnet_break;
4320
4321 local_irq_enable();
4322
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004323 /* Even though interrupts have been re-enabled, this
4324 * access is safe because interrupts can only add new
4325 * entries to the tail of this list, and only ->poll()
4326 * calls can remove this head entry from the list.
4327 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004328 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004330 have = netpoll_poll_lock(n);
4331
4332 weight = n->weight;
4333
David S. Miller0a7606c2007-10-29 21:28:47 -07004334 /* This NAPI_STATE_SCHED test is for avoiding a race
4335 * with netpoll's poll_napi(). Only the entity which
4336 * obtains the lock and sees NAPI_STATE_SCHED set will
4337 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004338 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004339 */
4340 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004341 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004342 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004343 trace_napi_poll(n);
4344 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004345
4346 WARN_ON_ONCE(work > weight);
4347
4348 budget -= work;
4349
4350 local_irq_disable();
4351
4352 /* Drivers must not modify the NAPI state if they
4353 * consume the entire weight. In such cases this code
4354 * still "owns" the NAPI instance and therefore can
4355 * move the instance around on the list at-will.
4356 */
David S. Millerfed17f32008-01-07 21:00:40 -08004357 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004358 if (unlikely(napi_disable_pending(n))) {
4359 local_irq_enable();
4360 napi_complete(n);
4361 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004362 } else {
4363 if (n->gro_list) {
4364 /* flush too old packets
4365 * If HZ < 1000, flush all packets.
4366 */
4367 local_irq_enable();
4368 napi_gro_flush(n, HZ >= 1000);
4369 local_irq_disable();
4370 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004371 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004372 }
David S. Millerfed17f32008-01-07 21:00:40 -08004373 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004374
4375 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376 }
4377out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004378 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004379
Chris Leechdb217332006-06-17 21:24:58 -07004380#ifdef CONFIG_NET_DMA
4381 /*
4382 * There may not be any more sk_buffs coming right now, so push
4383 * any pending DMA copies to hardware
4384 */
Dan Williams2ba05622009-01-06 11:38:14 -07004385 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004386#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004387
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 return;
4389
4390softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004391 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4393 goto out;
4394}
4395
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004396static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397
4398/**
4399 * register_gifconf - register a SIOCGIF handler
4400 * @family: Address family
4401 * @gifconf: Function handler
4402 *
4403 * Register protocol dependent address dumping routines. The handler
4404 * that is passed must not be freed or reused until it has been replaced
4405 * by another handler.
4406 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004407int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408{
4409 if (family >= NPROTO)
4410 return -EINVAL;
4411 gifconf_list[family] = gifconf;
4412 return 0;
4413}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004414EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415
4416
4417/*
4418 * Map an interface index to its name (SIOCGIFNAME)
4419 */
4420
4421/*
4422 * We need this ioctl for efficient implementation of the
4423 * if_indextoname() function required by the IPv6 API. Without
4424 * it, we would have to search all the interfaces to find a
4425 * match. --pb
4426 */
4427
Eric W. Biederman881d9662007-09-17 11:56:21 -07004428static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429{
4430 struct net_device *dev;
4431 struct ifreq ifr;
Brian Haleyc91f6df2012-11-26 05:21:08 +00004432 unsigned seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004433
4434 /*
4435 * Fetch the caller's info block.
4436 */
4437
4438 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4439 return -EFAULT;
4440
Brian Haleyc91f6df2012-11-26 05:21:08 +00004441retry:
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004442 seq = read_seqcount_begin(&devnet_rename_seq);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004443 rcu_read_lock();
4444 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004446 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 return -ENODEV;
4448 }
4449
4450 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004451 rcu_read_unlock();
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004452 if (read_seqcount_retry(&devnet_rename_seq, seq))
Brian Haleyc91f6df2012-11-26 05:21:08 +00004453 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454
4455 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4456 return -EFAULT;
4457 return 0;
4458}
4459
4460/*
4461 * Perform a SIOCGIFCONF call. This structure will change
4462 * size eventually, and there is nothing I can do about it.
4463 * Thus we will need a 'compatibility mode'.
4464 */
4465
Eric W. Biederman881d9662007-09-17 11:56:21 -07004466static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467{
4468 struct ifconf ifc;
4469 struct net_device *dev;
4470 char __user *pos;
4471 int len;
4472 int total;
4473 int i;
4474
4475 /*
4476 * Fetch the caller's info block.
4477 */
4478
4479 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4480 return -EFAULT;
4481
4482 pos = ifc.ifc_buf;
4483 len = ifc.ifc_len;
4484
4485 /*
4486 * Loop over the interfaces, and write an info block for each.
4487 */
4488
4489 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004490 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 for (i = 0; i < NPROTO; i++) {
4492 if (gifconf_list[i]) {
4493 int done;
4494 if (!pos)
4495 done = gifconf_list[i](dev, NULL, 0);
4496 else
4497 done = gifconf_list[i](dev, pos + total,
4498 len - total);
4499 if (done < 0)
4500 return -EFAULT;
4501 total += done;
4502 }
4503 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505
4506 /*
4507 * All done. Write the updated control block back to the caller.
4508 */
4509 ifc.ifc_len = total;
4510
4511 /*
4512 * Both BSD and Solaris return 0 here, so we do too.
4513 */
4514 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4515}
4516
4517#ifdef CONFIG_PROC_FS
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004518
Eric Dumazet2def16a2012-04-02 22:33:02 +00004519#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004520
4521#define get_bucket(x) ((x) >> BUCKET_SPACE)
4522#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4523#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4524
Eric Dumazet2def16a2012-04-02 22:33:02 +00004525static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004526{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004527 struct net *net = seq_file_net(seq);
4528 struct net_device *dev;
4529 struct hlist_node *p;
4530 struct hlist_head *h;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004531 unsigned int count = 0, offset = get_offset(*pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004532
Eric Dumazet2def16a2012-04-02 22:33:02 +00004533 h = &net->dev_name_head[get_bucket(*pos)];
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004534 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004535 if (++count == offset)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004536 return dev;
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004537 }
4538
4539 return NULL;
4540}
4541
Eric Dumazet2def16a2012-04-02 22:33:02 +00004542static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004543{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004544 struct net_device *dev;
4545 unsigned int bucket;
4546
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004547 do {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004548 dev = dev_from_same_bucket(seq, pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004549 if (dev)
4550 return dev;
4551
Eric Dumazet2def16a2012-04-02 22:33:02 +00004552 bucket = get_bucket(*pos) + 1;
4553 *pos = set_bucket_offset(bucket, 1);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004554 } while (bucket < NETDEV_HASHENTRIES);
4555
4556 return NULL;
4557}
4558
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559/*
4560 * This is invoked by the /proc filesystem handler to display a device
4561 * in detail.
4562 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004564 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004566 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07004567 if (!*pos)
4568 return SEQ_START_TOKEN;
4569
Eric Dumazet2def16a2012-04-02 22:33:02 +00004570 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004571 return NULL;
Pavel Emelianov7562f872007-05-03 15:13:45 -07004572
Eric Dumazet2def16a2012-04-02 22:33:02 +00004573 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574}
4575
4576void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4577{
4578 ++*pos;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004579 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580}
4581
4582void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004583 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004585 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586}
4587
4588static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4589{
Eric Dumazet28172732010-07-07 14:58:56 -07004590 struct rtnl_link_stats64 temp;
4591 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00004593 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4594 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
Rusty Russell5a1b5892007-04-28 21:04:03 -07004595 dev->name, stats->rx_bytes, stats->rx_packets,
4596 stats->rx_errors,
4597 stats->rx_dropped + stats->rx_missed_errors,
4598 stats->rx_fifo_errors,
4599 stats->rx_length_errors + stats->rx_over_errors +
4600 stats->rx_crc_errors + stats->rx_frame_errors,
4601 stats->rx_compressed, stats->multicast,
4602 stats->tx_bytes, stats->tx_packets,
4603 stats->tx_errors, stats->tx_dropped,
4604 stats->tx_fifo_errors, stats->collisions,
4605 stats->tx_carrier_errors +
4606 stats->tx_aborted_errors +
4607 stats->tx_window_errors +
4608 stats->tx_heartbeat_errors,
4609 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610}
4611
4612/*
4613 * Called from the PROCfs module. This now uses the new arbitrary sized
4614 * /proc/net interface to create /proc/net/dev
4615 */
4616static int dev_seq_show(struct seq_file *seq, void *v)
4617{
4618 if (v == SEQ_START_TOKEN)
4619 seq_puts(seq, "Inter-| Receive "
4620 " | Transmit\n"
4621 " face |bytes packets errs drop fifo frame "
4622 "compressed multicast|bytes packets errs "
4623 "drop fifo colls carrier compressed\n");
4624 else
4625 dev_seq_printf_stats(seq, v);
4626 return 0;
4627}
4628
Changli Gaodee42872010-05-02 05:42:16 +00004629static struct softnet_data *softnet_get_online(loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630{
Changli Gaodee42872010-05-02 05:42:16 +00004631 struct softnet_data *sd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004633 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004634 if (cpu_online(*pos)) {
Changli Gaodee42872010-05-02 05:42:16 +00004635 sd = &per_cpu(softnet_data, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 break;
4637 } else
4638 ++*pos;
Changli Gaodee42872010-05-02 05:42:16 +00004639 return sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640}
4641
4642static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4643{
4644 return softnet_get_online(pos);
4645}
4646
4647static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4648{
4649 ++*pos;
4650 return softnet_get_online(pos);
4651}
4652
4653static void softnet_seq_stop(struct seq_file *seq, void *v)
4654{
4655}
4656
4657static int softnet_seq_show(struct seq_file *seq, void *v)
4658{
Changli Gaodee42872010-05-02 05:42:16 +00004659 struct softnet_data *sd = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660
Tom Herbert0a9627f2010-03-16 08:03:29 +00004661 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Changli Gaodee42872010-05-02 05:42:16 +00004662 sd->processed, sd->dropped, sd->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07004663 0, 0, 0, 0, /* was fastroute */
Changli Gaodee42872010-05-02 05:42:16 +00004664 sd->cpu_collision, sd->received_rps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 return 0;
4666}
4667
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004668static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 .start = dev_seq_start,
4670 .next = dev_seq_next,
4671 .stop = dev_seq_stop,
4672 .show = dev_seq_show,
4673};
4674
4675static int dev_seq_open(struct inode *inode, struct file *file)
4676{
Denis V. Luneve372c412007-11-19 22:31:54 -08004677 return seq_open_net(inode, file, &dev_seq_ops,
Eric Dumazet2def16a2012-04-02 22:33:02 +00004678 sizeof(struct seq_net_private));
Anton Blanchard5cac98d2011-11-27 21:14:46 +00004679}
4680
Arjan van de Ven9a321442007-02-12 00:55:35 -08004681static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 .owner = THIS_MODULE,
4683 .open = dev_seq_open,
4684 .read = seq_read,
4685 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08004686 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687};
4688
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004689static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 .start = softnet_seq_start,
4691 .next = softnet_seq_next,
4692 .stop = softnet_seq_stop,
4693 .show = softnet_seq_show,
4694};
4695
4696static int softnet_seq_open(struct inode *inode, struct file *file)
4697{
4698 return seq_open(file, &softnet_seq_ops);
4699}
4700
Arjan van de Ven9a321442007-02-12 00:55:35 -08004701static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702 .owner = THIS_MODULE,
4703 .open = softnet_seq_open,
4704 .read = seq_read,
4705 .llseek = seq_lseek,
4706 .release = seq_release,
4707};
4708
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004709static void *ptype_get_idx(loff_t pos)
4710{
4711 struct packet_type *pt = NULL;
4712 loff_t i = 0;
4713 int t;
4714
4715 list_for_each_entry_rcu(pt, &ptype_all, list) {
4716 if (i == pos)
4717 return pt;
4718 ++i;
4719 }
4720
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004721 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004722 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4723 if (i == pos)
4724 return pt;
4725 ++i;
4726 }
4727 }
4728 return NULL;
4729}
4730
4731static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004732 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004733{
4734 rcu_read_lock();
4735 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4736}
4737
4738static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4739{
4740 struct packet_type *pt;
4741 struct list_head *nxt;
4742 int hash;
4743
4744 ++*pos;
4745 if (v == SEQ_START_TOKEN)
4746 return ptype_get_idx(0);
4747
4748 pt = v;
4749 nxt = pt->list.next;
4750 if (pt->type == htons(ETH_P_ALL)) {
4751 if (nxt != &ptype_all)
4752 goto found;
4753 hash = 0;
4754 nxt = ptype_base[0].next;
4755 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004756 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004757
4758 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004759 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004760 return NULL;
4761 nxt = ptype_base[hash].next;
4762 }
4763found:
4764 return list_entry(nxt, struct packet_type, list);
4765}
4766
4767static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004768 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004769{
4770 rcu_read_unlock();
4771}
4772
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004773static int ptype_seq_show(struct seq_file *seq, void *v)
4774{
4775 struct packet_type *pt = v;
4776
4777 if (v == SEQ_START_TOKEN)
4778 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004779 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004780 if (pt->type == htons(ETH_P_ALL))
4781 seq_puts(seq, "ALL ");
4782 else
4783 seq_printf(seq, "%04x", ntohs(pt->type));
4784
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08004785 seq_printf(seq, " %-8s %pF\n",
4786 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004787 }
4788
4789 return 0;
4790}
4791
4792static const struct seq_operations ptype_seq_ops = {
4793 .start = ptype_seq_start,
4794 .next = ptype_seq_next,
4795 .stop = ptype_seq_stop,
4796 .show = ptype_seq_show,
4797};
4798
4799static int ptype_seq_open(struct inode *inode, struct file *file)
4800{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004801 return seq_open_net(inode, file, &ptype_seq_ops,
4802 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004803}
4804
4805static const struct file_operations ptype_seq_fops = {
4806 .owner = THIS_MODULE,
4807 .open = ptype_seq_open,
4808 .read = seq_read,
4809 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004810 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004811};
4812
4813
Pavel Emelyanov46650792007-10-08 20:38:39 -07004814static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815{
4816 int rc = -ENOMEM;
4817
Eric W. Biederman881d9662007-09-17 11:56:21 -07004818 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004820 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004821 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004822 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004823 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004824
Eric W. Biederman881d9662007-09-17 11:56:21 -07004825 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004826 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 rc = 0;
4828out:
4829 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004830out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004831 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004833 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004835 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836 goto out;
4837}
Eric W. Biederman881d9662007-09-17 11:56:21 -07004838
Pavel Emelyanov46650792007-10-08 20:38:39 -07004839static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004840{
4841 wext_proc_exit(net);
4842
4843 proc_net_remove(net, "ptype");
4844 proc_net_remove(net, "softnet_stat");
4845 proc_net_remove(net, "dev");
4846}
4847
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004848static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004849 .init = dev_proc_net_init,
4850 .exit = dev_proc_net_exit,
4851};
4852
4853static int __init dev_proc_init(void)
4854{
4855 return register_pernet_subsys(&dev_proc_ops);
4856}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857#else
4858#define dev_proc_init() 0
4859#endif /* CONFIG_PROC_FS */
4860
4861
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004862struct netdev_upper {
4863 struct net_device *dev;
4864 bool master;
4865 struct list_head list;
4866 struct rcu_head rcu;
4867 struct list_head search_list;
4868};
4869
4870static void __append_search_uppers(struct list_head *search_list,
4871 struct net_device *dev)
4872{
4873 struct netdev_upper *upper;
4874
4875 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4876 /* check if this upper is not already in search list */
4877 if (list_empty(&upper->search_list))
4878 list_add_tail(&upper->search_list, search_list);
4879 }
4880}
4881
4882static bool __netdev_search_upper_dev(struct net_device *dev,
4883 struct net_device *upper_dev)
4884{
4885 LIST_HEAD(search_list);
4886 struct netdev_upper *upper;
4887 struct netdev_upper *tmp;
4888 bool ret = false;
4889
4890 __append_search_uppers(&search_list, dev);
4891 list_for_each_entry(upper, &search_list, search_list) {
4892 if (upper->dev == upper_dev) {
4893 ret = true;
4894 break;
4895 }
4896 __append_search_uppers(&search_list, upper->dev);
4897 }
4898 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4899 INIT_LIST_HEAD(&upper->search_list);
4900 return ret;
4901}
4902
4903static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4904 struct net_device *upper_dev)
4905{
4906 struct netdev_upper *upper;
4907
4908 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4909 if (upper->dev == upper_dev)
4910 return upper;
4911 }
4912 return NULL;
4913}
4914
4915/**
4916 * netdev_has_upper_dev - Check if device is linked to an upper device
4917 * @dev: device
4918 * @upper_dev: upper device to check
4919 *
4920 * Find out if a device is linked to specified upper device and return true
4921 * in case it is. Note that this checks only immediate upper device,
4922 * not through a complete stack of devices. The caller must hold the RTNL lock.
4923 */
4924bool netdev_has_upper_dev(struct net_device *dev,
4925 struct net_device *upper_dev)
4926{
4927 ASSERT_RTNL();
4928
4929 return __netdev_find_upper(dev, upper_dev);
4930}
4931EXPORT_SYMBOL(netdev_has_upper_dev);
4932
4933/**
4934 * netdev_has_any_upper_dev - Check if device is linked to some device
4935 * @dev: device
4936 *
4937 * Find out if a device is linked to an upper device and return true in case
4938 * it is. The caller must hold the RTNL lock.
4939 */
4940bool netdev_has_any_upper_dev(struct net_device *dev)
4941{
4942 ASSERT_RTNL();
4943
4944 return !list_empty(&dev->upper_dev_list);
4945}
4946EXPORT_SYMBOL(netdev_has_any_upper_dev);
4947
4948/**
4949 * netdev_master_upper_dev_get - Get master upper device
4950 * @dev: device
4951 *
4952 * Find a master upper device and return pointer to it or NULL in case
4953 * it's not there. The caller must hold the RTNL lock.
4954 */
4955struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4956{
4957 struct netdev_upper *upper;
4958
4959 ASSERT_RTNL();
4960
4961 if (list_empty(&dev->upper_dev_list))
4962 return NULL;
4963
4964 upper = list_first_entry(&dev->upper_dev_list,
4965 struct netdev_upper, list);
4966 if (likely(upper->master))
4967 return upper->dev;
4968 return NULL;
4969}
4970EXPORT_SYMBOL(netdev_master_upper_dev_get);
4971
4972/**
4973 * netdev_master_upper_dev_get_rcu - Get master upper device
4974 * @dev: device
4975 *
4976 * Find a master upper device and return pointer to it or NULL in case
4977 * it's not there. The caller must hold the RCU read lock.
4978 */
4979struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4980{
4981 struct netdev_upper *upper;
4982
4983 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4984 struct netdev_upper, list);
4985 if (upper && likely(upper->master))
4986 return upper->dev;
4987 return NULL;
4988}
4989EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4990
4991static int __netdev_upper_dev_link(struct net_device *dev,
4992 struct net_device *upper_dev, bool master)
4993{
4994 struct netdev_upper *upper;
4995
4996 ASSERT_RTNL();
4997
4998 if (dev == upper_dev)
4999 return -EBUSY;
5000
5001 /* To prevent loops, check if dev is not upper device to upper_dev. */
5002 if (__netdev_search_upper_dev(upper_dev, dev))
5003 return -EBUSY;
5004
5005 if (__netdev_find_upper(dev, upper_dev))
5006 return -EEXIST;
5007
5008 if (master && netdev_master_upper_dev_get(dev))
5009 return -EBUSY;
5010
5011 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
5012 if (!upper)
5013 return -ENOMEM;
5014
5015 upper->dev = upper_dev;
5016 upper->master = master;
5017 INIT_LIST_HEAD(&upper->search_list);
5018
5019 /* Ensure that master upper link is always the first item in list. */
5020 if (master)
5021 list_add_rcu(&upper->list, &dev->upper_dev_list);
5022 else
5023 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
5024 dev_hold(upper_dev);
5025
5026 return 0;
5027}
5028
5029/**
5030 * netdev_upper_dev_link - Add a link to the upper device
5031 * @dev: device
5032 * @upper_dev: new upper device
5033 *
5034 * Adds a link to device which is upper to this one. The caller must hold
5035 * the RTNL lock. On a failure a negative errno code is returned.
5036 * On success the reference counts are adjusted and the function
5037 * returns zero.
5038 */
5039int netdev_upper_dev_link(struct net_device *dev,
5040 struct net_device *upper_dev)
5041{
5042 return __netdev_upper_dev_link(dev, upper_dev, false);
5043}
5044EXPORT_SYMBOL(netdev_upper_dev_link);
5045
5046/**
5047 * netdev_master_upper_dev_link - Add a master link to the upper device
5048 * @dev: device
5049 * @upper_dev: new upper device
5050 *
5051 * Adds a link to device which is upper to this one. In this case, only
5052 * one master upper device can be linked, although other non-master devices
5053 * might be linked as well. The caller must hold the RTNL lock.
5054 * On a failure a negative errno code is returned. On success the reference
5055 * counts are adjusted and the function returns zero.
5056 */
5057int netdev_master_upper_dev_link(struct net_device *dev,
5058 struct net_device *upper_dev)
5059{
5060 return __netdev_upper_dev_link(dev, upper_dev, true);
5061}
5062EXPORT_SYMBOL(netdev_master_upper_dev_link);
5063
5064/**
5065 * netdev_upper_dev_unlink - Removes a link to upper device
5066 * @dev: device
5067 * @upper_dev: new upper device
5068 *
5069 * Removes a link to device which is upper to this one. The caller must hold
5070 * the RTNL lock.
5071 */
5072void netdev_upper_dev_unlink(struct net_device *dev,
5073 struct net_device *upper_dev)
5074{
5075 struct netdev_upper *upper;
5076
5077 ASSERT_RTNL();
5078
5079 upper = __netdev_find_upper(dev, upper_dev);
5080 if (!upper)
5081 return;
5082 list_del_rcu(&upper->list);
5083 dev_put(upper_dev);
5084 kfree_rcu(upper, rcu);
5085}
5086EXPORT_SYMBOL(netdev_upper_dev_unlink);
5087
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005088static void dev_change_rx_flags(struct net_device *dev, int flags)
5089{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005090 const struct net_device_ops *ops = dev->netdev_ops;
5091
5092 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
5093 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005094}
5095
Wang Chendad9b332008-06-18 01:48:28 -07005096static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07005097{
Eric Dumazetb536db92011-11-30 21:42:26 +00005098 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005099 kuid_t uid;
5100 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005101
Patrick McHardy24023452007-07-14 18:51:31 -07005102 ASSERT_RTNL();
5103
Wang Chendad9b332008-06-18 01:48:28 -07005104 dev->flags |= IFF_PROMISC;
5105 dev->promiscuity += inc;
5106 if (dev->promiscuity == 0) {
5107 /*
5108 * Avoid overflow.
5109 * If inc causes overflow, untouch promisc and return error.
5110 */
5111 if (inc < 0)
5112 dev->flags &= ~IFF_PROMISC;
5113 else {
5114 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005115 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5116 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005117 return -EOVERFLOW;
5118 }
5119 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005120 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005121 pr_info("device %s %s promiscuous mode\n",
5122 dev->name,
5123 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005124 if (audit_enabled) {
5125 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005126 audit_log(current->audit_context, GFP_ATOMIC,
5127 AUDIT_ANOM_PROMISCUOUS,
5128 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5129 dev->name, (dev->flags & IFF_PROMISC),
5130 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005131 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005132 from_kuid(&init_user_ns, uid),
5133 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005134 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005135 }
Patrick McHardy24023452007-07-14 18:51:31 -07005136
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005137 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005138 }
Wang Chendad9b332008-06-18 01:48:28 -07005139 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005140}
5141
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142/**
5143 * dev_set_promiscuity - update promiscuity count on a device
5144 * @dev: device
5145 * @inc: modifier
5146 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005147 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 * remains above zero the interface remains promiscuous. Once it hits zero
5149 * the device reverts back to normal filtering operation. A negative inc
5150 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005151 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152 */
Wang Chendad9b332008-06-18 01:48:28 -07005153int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154{
Eric Dumazetb536db92011-11-30 21:42:26 +00005155 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005156 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157
Wang Chendad9b332008-06-18 01:48:28 -07005158 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005159 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005160 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005161 if (dev->flags != old_flags)
5162 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005163 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005165EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166
5167/**
5168 * dev_set_allmulti - update allmulti count on a device
5169 * @dev: device
5170 * @inc: modifier
5171 *
5172 * Add or remove reception of all multicast frames to a device. While the
5173 * count in the device remains above zero the interface remains listening
5174 * to all interfaces. Once it hits zero the device reverts back to normal
5175 * filtering operation. A negative @inc value is used to drop the counter
5176 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07005177 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 */
5179
Wang Chendad9b332008-06-18 01:48:28 -07005180int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005181{
Eric Dumazetb536db92011-11-30 21:42:26 +00005182 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183
Patrick McHardy24023452007-07-14 18:51:31 -07005184 ASSERT_RTNL();
5185
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005187 dev->allmulti += inc;
5188 if (dev->allmulti == 0) {
5189 /*
5190 * Avoid overflow.
5191 * If inc causes overflow, untouch allmulti and return error.
5192 */
5193 if (inc < 0)
5194 dev->flags &= ~IFF_ALLMULTI;
5195 else {
5196 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005197 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5198 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005199 return -EOVERFLOW;
5200 }
5201 }
Patrick McHardy24023452007-07-14 18:51:31 -07005202 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005203 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005204 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07005205 }
Wang Chendad9b332008-06-18 01:48:28 -07005206 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005207}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005208EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005209
5210/*
5211 * Upload unicast and multicast address lists to device and
5212 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005213 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005214 * are present.
5215 */
5216void __dev_set_rx_mode(struct net_device *dev)
5217{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005218 const struct net_device_ops *ops = dev->netdev_ops;
5219
Patrick McHardy4417da62007-06-27 01:28:10 -07005220 /* dev_open will call this function so the list will stay sane. */
5221 if (!(dev->flags&IFF_UP))
5222 return;
5223
5224 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005225 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005226
Jiri Pirko01789342011-08-16 06:29:00 +00005227 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005228 /* Unicast addresses changes may only happen under the rtnl,
5229 * therefore calling __dev_set_promiscuity here is safe.
5230 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005231 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005232 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07005233 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005234 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005235 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07005236 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005237 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005238 }
Jiri Pirko01789342011-08-16 06:29:00 +00005239
5240 if (ops->ndo_set_rx_mode)
5241 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005242}
5243
5244void dev_set_rx_mode(struct net_device *dev)
5245{
David S. Millerb9e40852008-07-15 00:15:08 -07005246 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005247 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005248 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249}
5250
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005251/**
5252 * dev_get_flags - get flags reported to userspace
5253 * @dev: device
5254 *
5255 * Get the combination of flag bits exported through APIs to userspace.
5256 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005257unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258{
Eric Dumazet95c96172012-04-15 05:58:06 +00005259 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260
5261 flags = (dev->flags & ~(IFF_PROMISC |
5262 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005263 IFF_RUNNING |
5264 IFF_LOWER_UP |
5265 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266 (dev->gflags & (IFF_PROMISC |
5267 IFF_ALLMULTI));
5268
Stefan Rompfb00055a2006-03-20 17:09:11 -08005269 if (netif_running(dev)) {
5270 if (netif_oper_up(dev))
5271 flags |= IFF_RUNNING;
5272 if (netif_carrier_ok(dev))
5273 flags |= IFF_LOWER_UP;
5274 if (netif_dormant(dev))
5275 flags |= IFF_DORMANT;
5276 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277
5278 return flags;
5279}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005280EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281
Patrick McHardybd380812010-02-26 06:34:53 +00005282int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005283{
Eric Dumazetb536db92011-11-30 21:42:26 +00005284 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005285 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286
Patrick McHardy24023452007-07-14 18:51:31 -07005287 ASSERT_RTNL();
5288
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289 /*
5290 * Set the flags on our device.
5291 */
5292
5293 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5294 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5295 IFF_AUTOMEDIA)) |
5296 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5297 IFF_ALLMULTI));
5298
5299 /*
5300 * Load in the correct multicast list now the flags have changed.
5301 */
5302
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005303 if ((old_flags ^ flags) & IFF_MULTICAST)
5304 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005305
Patrick McHardy4417da62007-06-27 01:28:10 -07005306 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307
5308 /*
5309 * Have we downed the interface. We handle IFF_UP ourselves
5310 * according to user attempts to set it, rather than blindly
5311 * setting it.
5312 */
5313
5314 ret = 0;
5315 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005316 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005317
5318 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005319 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320 }
5321
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005323 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5324
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325 dev->gflags ^= IFF_PROMISC;
5326 dev_set_promiscuity(dev, inc);
5327 }
5328
5329 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5330 is important. Some (broken) drivers set IFF_PROMISC, when
5331 IFF_ALLMULTI is requested not asking us and not reporting.
5332 */
5333 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005334 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5335
Linus Torvalds1da177e2005-04-16 15:20:36 -07005336 dev->gflags ^= IFF_ALLMULTI;
5337 dev_set_allmulti(dev, inc);
5338 }
5339
Patrick McHardybd380812010-02-26 06:34:53 +00005340 return ret;
5341}
5342
5343void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
5344{
5345 unsigned int changes = dev->flags ^ old_flags;
5346
5347 if (changes & IFF_UP) {
5348 if (dev->flags & IFF_UP)
5349 call_netdevice_notifiers(NETDEV_UP, dev);
5350 else
5351 call_netdevice_notifiers(NETDEV_DOWN, dev);
5352 }
5353
5354 if (dev->flags & IFF_UP &&
5355 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
5356 call_netdevice_notifiers(NETDEV_CHANGE, dev);
5357}
5358
5359/**
5360 * dev_change_flags - change device settings
5361 * @dev: device
5362 * @flags: device state flags
5363 *
5364 * Change settings on device based state flags. The flags are
5365 * in the userspace exported format.
5366 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005367int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005368{
Eric Dumazetb536db92011-11-30 21:42:26 +00005369 int ret;
5370 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005371
5372 ret = __dev_change_flags(dev, flags);
5373 if (ret < 0)
5374 return ret;
5375
5376 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07005377 if (changes)
5378 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379
Patrick McHardybd380812010-02-26 06:34:53 +00005380 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381 return ret;
5382}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005383EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005385/**
5386 * dev_set_mtu - Change maximum transfer unit
5387 * @dev: device
5388 * @new_mtu: new transfer unit
5389 *
5390 * Change the maximum transfer size of the network device.
5391 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392int dev_set_mtu(struct net_device *dev, int new_mtu)
5393{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005394 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 int err;
5396
5397 if (new_mtu == dev->mtu)
5398 return 0;
5399
5400 /* MTU must be positive. */
5401 if (new_mtu < 0)
5402 return -EINVAL;
5403
5404 if (!netif_device_present(dev))
5405 return -ENODEV;
5406
5407 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005408 if (ops->ndo_change_mtu)
5409 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410 else
5411 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005412
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005413 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005414 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415 return err;
5416}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005417EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005419/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005420 * dev_set_group - Change group this device belongs to
5421 * @dev: device
5422 * @new_group: group this device should belong to
5423 */
5424void dev_set_group(struct net_device *dev, int new_group)
5425{
5426 dev->group = new_group;
5427}
5428EXPORT_SYMBOL(dev_set_group);
5429
5430/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005431 * dev_set_mac_address - Change Media Access Control Address
5432 * @dev: device
5433 * @sa: new address
5434 *
5435 * Change the hardware (MAC) address of the device
5436 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5438{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005439 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 int err;
5441
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005442 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 return -EOPNOTSUPP;
5444 if (sa->sa_family != dev->type)
5445 return -EINVAL;
5446 if (!netif_device_present(dev))
5447 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005448 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005449 if (err)
5450 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005451 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005452 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005453 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005454 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005456EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005458/**
5459 * dev_change_carrier - Change device carrier
5460 * @dev: device
5461 * @new_carries: new value
5462 *
5463 * Change device carrier
5464 */
5465int dev_change_carrier(struct net_device *dev, bool new_carrier)
5466{
5467 const struct net_device_ops *ops = dev->netdev_ops;
5468
5469 if (!ops->ndo_change_carrier)
5470 return -EOPNOTSUPP;
5471 if (!netif_device_present(dev))
5472 return -ENODEV;
5473 return ops->ndo_change_carrier(dev, new_carrier);
5474}
5475EXPORT_SYMBOL(dev_change_carrier);
5476
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00005478 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07005480static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005481{
5482 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00005483 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005484
5485 if (!dev)
5486 return -ENODEV;
5487
5488 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005489 case SIOCGIFFLAGS: /* Get interface flags */
5490 ifr->ifr_flags = (short) dev_get_flags(dev);
5491 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005492
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005493 case SIOCGIFMETRIC: /* Get the metric on the interface
5494 (currently unused) */
5495 ifr->ifr_metric = 0;
5496 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005497
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005498 case SIOCGIFMTU: /* Get the MTU of a device */
5499 ifr->ifr_mtu = dev->mtu;
5500 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005501
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005502 case SIOCGIFHWADDR:
5503 if (!dev->addr_len)
5504 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5505 else
5506 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5507 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5508 ifr->ifr_hwaddr.sa_family = dev->type;
5509 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005510
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005511 case SIOCGIFSLAVE:
5512 err = -EINVAL;
5513 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005514
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005515 case SIOCGIFMAP:
5516 ifr->ifr_map.mem_start = dev->mem_start;
5517 ifr->ifr_map.mem_end = dev->mem_end;
5518 ifr->ifr_map.base_addr = dev->base_addr;
5519 ifr->ifr_map.irq = dev->irq;
5520 ifr->ifr_map.dma = dev->dma;
5521 ifr->ifr_map.port = dev->if_port;
5522 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005523
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005524 case SIOCGIFINDEX:
5525 ifr->ifr_ifindex = dev->ifindex;
5526 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005527
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005528 case SIOCGIFTXQLEN:
5529 ifr->ifr_qlen = dev->tx_queue_len;
5530 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005531
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005532 default:
5533 /* dev_ioctl() should ensure this case
5534 * is never reached
5535 */
5536 WARN_ON(1);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005537 err = -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005538 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005539
5540 }
5541 return err;
5542}
5543
5544/*
5545 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5546 */
5547static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5548{
5549 int err;
5550 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005551 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005552
5553 if (!dev)
5554 return -ENODEV;
5555
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005556 ops = dev->netdev_ops;
5557
Jeff Garzik14e3e072007-10-08 00:06:32 -07005558 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005559 case SIOCSIFFLAGS: /* Set interface flags */
5560 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005561
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005562 case SIOCSIFMETRIC: /* Set the metric on the interface
5563 (currently unused) */
5564 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005565
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005566 case SIOCSIFMTU: /* Set the MTU of a device */
5567 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005568
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005569 case SIOCSIFHWADDR:
5570 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005571
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005572 case SIOCSIFHWBROADCAST:
5573 if (ifr->ifr_hwaddr.sa_family != dev->type)
5574 return -EINVAL;
5575 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5576 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5577 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5578 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005579
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005580 case SIOCSIFMAP:
5581 if (ops->ndo_set_config) {
5582 if (!netif_device_present(dev))
5583 return -ENODEV;
5584 return ops->ndo_set_config(dev, &ifr->ifr_map);
5585 }
5586 return -EOPNOTSUPP;
5587
5588 case SIOCADDMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005589 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005590 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5591 return -EINVAL;
5592 if (!netif_device_present(dev))
5593 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005594 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005595
5596 case SIOCDELMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005597 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005598 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5599 return -EINVAL;
5600 if (!netif_device_present(dev))
5601 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005602 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005603
5604 case SIOCSIFTXQLEN:
5605 if (ifr->ifr_qlen < 0)
5606 return -EINVAL;
5607 dev->tx_queue_len = ifr->ifr_qlen;
5608 return 0;
5609
5610 case SIOCSIFNAME:
5611 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5612 return dev_change_name(dev, ifr->ifr_newname);
5613
Richard Cochran4dc360c2011-10-19 17:00:35 -04005614 case SIOCSHWTSTAMP:
5615 err = net_hwtstamp_validate(ifr);
5616 if (err)
5617 return err;
5618 /* fall through */
5619
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005620 /*
5621 * Unknown or private ioctl
5622 */
5623 default:
5624 if ((cmd >= SIOCDEVPRIVATE &&
5625 cmd <= SIOCDEVPRIVATE + 15) ||
5626 cmd == SIOCBONDENSLAVE ||
5627 cmd == SIOCBONDRELEASE ||
5628 cmd == SIOCBONDSETHWADDR ||
5629 cmd == SIOCBONDSLAVEINFOQUERY ||
5630 cmd == SIOCBONDINFOQUERY ||
5631 cmd == SIOCBONDCHANGEACTIVE ||
5632 cmd == SIOCGMIIPHY ||
5633 cmd == SIOCGMIIREG ||
5634 cmd == SIOCSMIIREG ||
5635 cmd == SIOCBRADDIF ||
5636 cmd == SIOCBRDELIF ||
5637 cmd == SIOCSHWTSTAMP ||
5638 cmd == SIOCWANDEV) {
5639 err = -EOPNOTSUPP;
5640 if (ops->ndo_do_ioctl) {
5641 if (netif_device_present(dev))
5642 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5643 else
5644 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005646 } else
5647 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005648
5649 }
5650 return err;
5651}
5652
5653/*
5654 * This function handles all "interface"-type I/O control requests. The actual
5655 * 'doing' part of this is dev_ifsioc above.
5656 */
5657
5658/**
5659 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005660 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005661 * @cmd: command to issue
5662 * @arg: pointer to a struct ifreq in user space
5663 *
5664 * Issue ioctl functions to devices. This is normally called by the
5665 * user space syscall interfaces but can sometimes be useful for
5666 * other purposes. The return value is the return from the syscall if
5667 * positive or a negative errno code on error.
5668 */
5669
Eric W. Biederman881d9662007-09-17 11:56:21 -07005670int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671{
5672 struct ifreq ifr;
5673 int ret;
5674 char *colon;
5675
5676 /* One special case: SIOCGIFCONF takes ifconf argument
5677 and requires shared lock, because it sleeps writing
5678 to user space.
5679 */
5680
5681 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005682 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005683 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005684 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685 return ret;
5686 }
5687 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005688 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005689
5690 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5691 return -EFAULT;
5692
5693 ifr.ifr_name[IFNAMSIZ-1] = 0;
5694
5695 colon = strchr(ifr.ifr_name, ':');
5696 if (colon)
5697 *colon = 0;
5698
5699 /*
5700 * See which interface the caller is talking about.
5701 */
5702
5703 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005704 /*
5705 * These ioctl calls:
5706 * - can be done by all.
5707 * - atomic and do not require locking.
5708 * - return a value
5709 */
5710 case SIOCGIFFLAGS:
5711 case SIOCGIFMETRIC:
5712 case SIOCGIFMTU:
5713 case SIOCGIFHWADDR:
5714 case SIOCGIFSLAVE:
5715 case SIOCGIFMAP:
5716 case SIOCGIFINDEX:
5717 case SIOCGIFTXQLEN:
5718 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005719 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005720 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005721 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005722 if (!ret) {
5723 if (colon)
5724 *colon = ':';
5725 if (copy_to_user(arg, &ifr,
5726 sizeof(struct ifreq)))
5727 ret = -EFAULT;
5728 }
5729 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005731 case SIOCETHTOOL:
5732 dev_load(net, ifr.ifr_name);
5733 rtnl_lock();
5734 ret = dev_ethtool(net, &ifr);
5735 rtnl_unlock();
5736 if (!ret) {
5737 if (colon)
5738 *colon = ':';
5739 if (copy_to_user(arg, &ifr,
5740 sizeof(struct ifreq)))
5741 ret = -EFAULT;
5742 }
5743 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005745 /*
5746 * These ioctl calls:
5747 * - require superuser power.
5748 * - require strict serialization.
5749 * - return a value
5750 */
5751 case SIOCGMIIPHY:
5752 case SIOCGMIIREG:
5753 case SIOCSIFNAME:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005754 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005755 return -EPERM;
5756 dev_load(net, ifr.ifr_name);
5757 rtnl_lock();
5758 ret = dev_ifsioc(net, &ifr, cmd);
5759 rtnl_unlock();
5760 if (!ret) {
5761 if (colon)
5762 *colon = ':';
5763 if (copy_to_user(arg, &ifr,
5764 sizeof(struct ifreq)))
5765 ret = -EFAULT;
5766 }
5767 return ret;
5768
5769 /*
5770 * These ioctl calls:
5771 * - require superuser power.
5772 * - require strict serialization.
5773 * - do not return a value
5774 */
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005775 case SIOCSIFMAP:
5776 case SIOCSIFTXQLEN:
5777 if (!capable(CAP_NET_ADMIN))
5778 return -EPERM;
5779 /* fall through */
5780 /*
5781 * These ioctl calls:
5782 * - require local superuser power.
5783 * - require strict serialization.
5784 * - do not return a value
5785 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005786 case SIOCSIFFLAGS:
5787 case SIOCSIFMETRIC:
5788 case SIOCSIFMTU:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005789 case SIOCSIFHWADDR:
5790 case SIOCSIFSLAVE:
5791 case SIOCADDMULTI:
5792 case SIOCDELMULTI:
5793 case SIOCSIFHWBROADCAST:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005794 case SIOCSMIIREG:
5795 case SIOCBONDENSLAVE:
5796 case SIOCBONDRELEASE:
5797 case SIOCBONDSETHWADDR:
5798 case SIOCBONDCHANGEACTIVE:
5799 case SIOCBRADDIF:
5800 case SIOCBRDELIF:
5801 case SIOCSHWTSTAMP:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005802 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005803 return -EPERM;
5804 /* fall through */
5805 case SIOCBONDSLAVEINFOQUERY:
5806 case SIOCBONDINFOQUERY:
5807 dev_load(net, ifr.ifr_name);
5808 rtnl_lock();
5809 ret = dev_ifsioc(net, &ifr, cmd);
5810 rtnl_unlock();
5811 return ret;
5812
5813 case SIOCGIFMEM:
5814 /* Get the per device memory space. We can add this but
5815 * currently do not support it */
5816 case SIOCSIFMEM:
5817 /* Set the per device memory buffer space.
5818 * Not applicable in our case */
5819 case SIOCSIFLINK:
Lifeng Sun41c31f32011-04-27 22:04:51 +00005820 return -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005821
5822 /*
5823 * Unknown or private ioctl.
5824 */
5825 default:
5826 if (cmd == SIOCWANDEV ||
5827 (cmd >= SIOCDEVPRIVATE &&
5828 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005829 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005830 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005831 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005832 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005833 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005834 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005835 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005836 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005837 }
5838 /* Take care of Wireless Extensions */
5839 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5840 return wext_handle_ioctl(net, &ifr, cmd, arg);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005841 return -ENOTTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842 }
5843}
5844
5845
5846/**
5847 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005848 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005849 *
5850 * Returns a suitable unique value for a new device interface
5851 * number. The caller must hold the rtnl semaphore or the
5852 * dev_base_lock to be sure it remains unique.
5853 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005854static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005856 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005857 for (;;) {
5858 if (++ifindex <= 0)
5859 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005860 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005861 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005862 }
5863}
5864
Linus Torvalds1da177e2005-04-16 15:20:36 -07005865/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005866static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005868static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005869{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005870 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005871}
5872
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005873static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005874{
Krishna Kumare93737b2009-12-08 22:26:02 +00005875 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005876
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005877 BUG_ON(dev_boot_phase);
5878 ASSERT_RTNL();
5879
Krishna Kumare93737b2009-12-08 22:26:02 +00005880 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005881 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005882 * for initialization unwind. Remove those
5883 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005884 */
5885 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005886 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5887 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005888
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005889 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005890 list_del(&dev->unreg_list);
5891 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005892 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005893 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005894 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005895 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005896
Octavian Purdila44345722010-12-13 12:44:07 +00005897 /* If device is running, close it first. */
5898 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005899
Octavian Purdila44345722010-12-13 12:44:07 +00005900 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005901 /* And unlink it from device chain. */
5902 unlist_netdevice(dev);
5903
5904 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005905 }
5906
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005907 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005908
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005909 list_for_each_entry(dev, head, unreg_list) {
5910 /* Shutdown queueing discipline. */
5911 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005912
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005913
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005914 /* Notify protocols, that we are about to destroy
5915 this device. They should clean all the things.
5916 */
5917 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5918
Patrick McHardya2835762010-02-26 06:34:51 +00005919 if (!dev->rtnl_link_ops ||
5920 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5921 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5922
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005923 /*
5924 * Flush the unicast and multicast chains
5925 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005926 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005927 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005928
5929 if (dev->netdev_ops->ndo_uninit)
5930 dev->netdev_ops->ndo_uninit(dev);
5931
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005932 /* Notifier chain MUST detach us all upper devices. */
5933 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005934
5935 /* Remove entries from kobject tree */
5936 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00005937#ifdef CONFIG_XPS
5938 /* Remove XPS queueing entries */
5939 netif_reset_xps_queues_gt(dev, 0);
5940#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005941 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005942
Eric W. Biederman850a5452011-10-13 22:25:23 +00005943 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005944
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005945 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005946 dev_put(dev);
5947}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005948
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005949static void rollback_registered(struct net_device *dev)
5950{
5951 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005952
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005953 list_add(&dev->unreg_list, &single);
5954 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005955 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005956}
5957
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005958static netdev_features_t netdev_fix_features(struct net_device *dev,
5959 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005960{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005961 /* Fix illegal checksum combinations */
5962 if ((features & NETIF_F_HW_CSUM) &&
5963 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005964 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005965 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5966 }
5967
Herbert Xub63365a2008-10-23 01:11:29 -07005968 /* Fix illegal SG+CSUM combinations. */
5969 if ((features & NETIF_F_SG) &&
5970 !(features & NETIF_F_ALL_CSUM)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005971 netdev_dbg(dev,
5972 "Dropping NETIF_F_SG since no checksum feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005973 features &= ~NETIF_F_SG;
5974 }
5975
5976 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005977 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005978 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005979 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005980 }
5981
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005982 /* TSO ECN requires that TSO is present as well. */
5983 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5984 features &= ~NETIF_F_TSO_ECN;
5985
Michał Mirosław212b5732011-02-15 16:59:16 +00005986 /* Software GSO depends on SG. */
5987 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005988 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005989 features &= ~NETIF_F_GSO;
5990 }
5991
Michał Mirosławacd11302011-01-24 15:45:15 -08005992 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005993 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005994 /* maybe split UFO into V4 and V6? */
5995 if (!((features & NETIF_F_GEN_CSUM) ||
5996 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5997 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005998 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005999 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006000 features &= ~NETIF_F_UFO;
6001 }
6002
6003 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04006004 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006005 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006006 features &= ~NETIF_F_UFO;
6007 }
6008 }
6009
6010 return features;
6011}
Herbert Xub63365a2008-10-23 01:11:29 -07006012
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006013int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00006014{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006015 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00006016 int err = 0;
6017
Michał Mirosław87267482011-04-12 09:56:38 +00006018 ASSERT_RTNL();
6019
Michał Mirosław5455c692011-02-15 16:59:17 +00006020 features = netdev_get_wanted_features(dev);
6021
6022 if (dev->netdev_ops->ndo_fix_features)
6023 features = dev->netdev_ops->ndo_fix_features(dev, features);
6024
6025 /* driver might be less strict about feature dependencies */
6026 features = netdev_fix_features(dev, features);
6027
6028 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006029 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00006030
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006031 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6032 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00006033
6034 if (dev->netdev_ops->ndo_set_features)
6035 err = dev->netdev_ops->ndo_set_features(dev, features);
6036
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006037 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00006038 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006039 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6040 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006041 return -1;
6042 }
6043
6044 if (!err)
6045 dev->features = features;
6046
6047 return 1;
6048}
6049
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006050/**
6051 * netdev_update_features - recalculate device features
6052 * @dev: the device to check
6053 *
6054 * Recalculate dev->features set and send notifications if it
6055 * has changed. Should be called after driver or hardware dependent
6056 * conditions might have changed that influence the features.
6057 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006058void netdev_update_features(struct net_device *dev)
6059{
6060 if (__netdev_update_features(dev))
6061 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00006062}
6063EXPORT_SYMBOL(netdev_update_features);
6064
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006066 * netdev_change_features - recalculate device features
6067 * @dev: the device to check
6068 *
6069 * Recalculate dev->features set and send notifications even
6070 * if they have not changed. Should be called instead of
6071 * netdev_update_features() if also dev->vlan_features might
6072 * have changed to allow the changes to be propagated to stacked
6073 * VLAN devices.
6074 */
6075void netdev_change_features(struct net_device *dev)
6076{
6077 __netdev_update_features(dev);
6078 netdev_features_change(dev);
6079}
6080EXPORT_SYMBOL(netdev_change_features);
6081
6082/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006083 * netif_stacked_transfer_operstate - transfer operstate
6084 * @rootdev: the root or lower level device to transfer state from
6085 * @dev: the device to transfer operstate to
6086 *
6087 * Transfer operational state from root to device. This is normally
6088 * called when a stacking relationship exists between the root
6089 * device and the device(a leaf device).
6090 */
6091void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6092 struct net_device *dev)
6093{
6094 if (rootdev->operstate == IF_OPER_DORMANT)
6095 netif_dormant_on(dev);
6096 else
6097 netif_dormant_off(dev);
6098
6099 if (netif_carrier_ok(rootdev)) {
6100 if (!netif_carrier_ok(dev))
6101 netif_carrier_on(dev);
6102 } else {
6103 if (netif_carrier_ok(dev))
6104 netif_carrier_off(dev);
6105 }
6106}
6107EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6108
Tom Herbertbf264142010-11-26 08:36:09 +00006109#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006110static int netif_alloc_rx_queues(struct net_device *dev)
6111{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006112 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006113 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006114
Tom Herbertbd25fa72010-10-18 18:00:16 +00006115 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006116
Tom Herbertbd25fa72010-10-18 18:00:16 +00006117 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6118 if (!rx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006119 pr_err("netdev: Unable to allocate %u rx queues\n", count);
Tom Herbertbd25fa72010-10-18 18:00:16 +00006120 return -ENOMEM;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006121 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00006122 dev->_rx = rx;
6123
Tom Herbertbd25fa72010-10-18 18:00:16 +00006124 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006125 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006126 return 0;
6127}
Tom Herbertbf264142010-11-26 08:36:09 +00006128#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006129
Changli Gaoaa942102010-12-04 02:31:41 +00006130static void netdev_init_one_queue(struct net_device *dev,
6131 struct netdev_queue *queue, void *_unused)
6132{
6133 /* Initialize queue lock */
6134 spin_lock_init(&queue->_xmit_lock);
6135 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6136 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006137 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006138 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006139#ifdef CONFIG_BQL
6140 dql_init(&queue->dql, HZ);
6141#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006142}
6143
Tom Herberte6484932010-10-18 18:04:39 +00006144static int netif_alloc_netdev_queues(struct net_device *dev)
6145{
6146 unsigned int count = dev->num_tx_queues;
6147 struct netdev_queue *tx;
6148
6149 BUG_ON(count < 1);
6150
6151 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
6152 if (!tx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006153 pr_err("netdev: Unable to allocate %u tx queues\n", count);
Tom Herberte6484932010-10-18 18:04:39 +00006154 return -ENOMEM;
6155 }
6156 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006157
Tom Herberte6484932010-10-18 18:04:39 +00006158 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6159 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006160
6161 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006162}
6163
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006164/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165 * register_netdevice - register a network device
6166 * @dev: device to register
6167 *
6168 * Take a completed network device structure and add it to the kernel
6169 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6170 * chain. 0 is returned on success. A negative errno code is returned
6171 * on a failure to set up the device, or if the name is a duplicate.
6172 *
6173 * Callers must hold the rtnl semaphore. You may want
6174 * register_netdev() instead of this.
6175 *
6176 * BUGS:
6177 * The locking appears insufficient to guarantee two parallel registers
6178 * will not get the same name.
6179 */
6180
6181int register_netdevice(struct net_device *dev)
6182{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006183 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006184 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006185
6186 BUG_ON(dev_boot_phase);
6187 ASSERT_RTNL();
6188
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006189 might_sleep();
6190
Linus Torvalds1da177e2005-04-16 15:20:36 -07006191 /* When net_device's are persistent, this will be fatal. */
6192 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006193 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006194
David S. Millerf1f28aa2008-07-15 00:08:33 -07006195 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006196 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006197
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198 dev->iflink = -1;
6199
Gao feng828de4f2012-09-13 20:58:27 +00006200 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006201 if (ret < 0)
6202 goto out;
6203
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006205 if (dev->netdev_ops->ndo_init) {
6206 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006207 if (ret) {
6208 if (ret > 0)
6209 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006210 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006211 }
6212 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006213
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006214 ret = -EBUSY;
6215 if (!dev->ifindex)
6216 dev->ifindex = dev_new_index(net);
6217 else if (__dev_get_by_index(net, dev->ifindex))
6218 goto err_uninit;
6219
Linus Torvalds1da177e2005-04-16 15:20:36 -07006220 if (dev->iflink == -1)
6221 dev->iflink = dev->ifindex;
6222
Michał Mirosław5455c692011-02-15 16:59:17 +00006223 /* Transfer changeable features to wanted_features and enable
6224 * software offloads (GSO and GRO).
6225 */
6226 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006227 dev->features |= NETIF_F_SOFT_FEATURES;
6228 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006230 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00006231 if (!(dev->flags & IFF_LOOPBACK)) {
6232 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6233 if (dev->features & NETIF_F_ALL_CSUM) {
6234 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
6235 dev->features |= NETIF_F_NOCACHE_COPY;
6236 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006237 }
6238
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006239 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006240 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006241 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006242
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006243 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6244 ret = notifier_to_errno(ret);
6245 if (ret)
6246 goto err_uninit;
6247
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006248 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006249 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006250 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006251 dev->reg_state = NETREG_REGISTERED;
6252
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006253 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006254
Linus Torvalds1da177e2005-04-16 15:20:36 -07006255 /*
6256 * Default initial state at registry is that the
6257 * device is present.
6258 */
6259
6260 set_bit(__LINK_STATE_PRESENT, &dev->state);
6261
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006262 linkwatch_init_dev(dev);
6263
Linus Torvalds1da177e2005-04-16 15:20:36 -07006264 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006265 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006266 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006267 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006268
Jiri Pirko948b3372013-01-08 01:38:25 +00006269 /* If the device has permanent device address, driver should
6270 * set dev_addr and also addr_assign_type should be set to
6271 * NET_ADDR_PERM (default value).
6272 */
6273 if (dev->addr_assign_type == NET_ADDR_PERM)
6274 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6275
Linus Torvalds1da177e2005-04-16 15:20:36 -07006276 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006277 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006278 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006279 if (ret) {
6280 rollback_registered(dev);
6281 dev->reg_state = NETREG_UNREGISTERED;
6282 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006283 /*
6284 * Prevent userspace races by waiting until the network
6285 * device is fully setup before sending notifications.
6286 */
Patrick McHardya2835762010-02-26 06:34:51 +00006287 if (!dev->rtnl_link_ops ||
6288 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6289 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006290
6291out:
6292 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006293
6294err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006295 if (dev->netdev_ops->ndo_uninit)
6296 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006297 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006298}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006299EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006300
6301/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006302 * init_dummy_netdev - init a dummy network device for NAPI
6303 * @dev: device to init
6304 *
6305 * This takes a network device structure and initialize the minimum
6306 * amount of fields so it can be used to schedule NAPI polls without
6307 * registering a full blown interface. This is to be used by drivers
6308 * that need to tie several hardware interfaces to a single NAPI
6309 * poll scheduler due to HW limitations.
6310 */
6311int init_dummy_netdev(struct net_device *dev)
6312{
6313 /* Clear everything. Note we don't initialize spinlocks
6314 * are they aren't supposed to be taken by any of the
6315 * NAPI code and this dummy netdev is supposed to be
6316 * only ever used for NAPI polls
6317 */
6318 memset(dev, 0, sizeof(struct net_device));
6319
6320 /* make sure we BUG if trying to hit standard
6321 * register/unregister code path
6322 */
6323 dev->reg_state = NETREG_DUMMY;
6324
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006325 /* NAPI wants this */
6326 INIT_LIST_HEAD(&dev->napi_list);
6327
6328 /* a dummy interface is started by default */
6329 set_bit(__LINK_STATE_PRESENT, &dev->state);
6330 set_bit(__LINK_STATE_START, &dev->state);
6331
Eric Dumazet29b44332010-10-11 10:22:12 +00006332 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6333 * because users of this 'device' dont need to change
6334 * its refcount.
6335 */
6336
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006337 return 0;
6338}
6339EXPORT_SYMBOL_GPL(init_dummy_netdev);
6340
6341
6342/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006343 * register_netdev - register a network device
6344 * @dev: device to register
6345 *
6346 * Take a completed network device structure and add it to the kernel
6347 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6348 * chain. 0 is returned on success. A negative errno code is returned
6349 * on a failure to set up the device, or if the name is a duplicate.
6350 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006351 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352 * and expands the device name if you passed a format string to
6353 * alloc_netdev.
6354 */
6355int register_netdev(struct net_device *dev)
6356{
6357 int err;
6358
6359 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361 rtnl_unlock();
6362 return err;
6363}
6364EXPORT_SYMBOL(register_netdev);
6365
Eric Dumazet29b44332010-10-11 10:22:12 +00006366int netdev_refcnt_read(const struct net_device *dev)
6367{
6368 int i, refcnt = 0;
6369
6370 for_each_possible_cpu(i)
6371 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6372 return refcnt;
6373}
6374EXPORT_SYMBOL(netdev_refcnt_read);
6375
Ben Hutchings2c530402012-07-10 10:55:09 +00006376/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006377 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006378 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006379 *
6380 * This is called when unregistering network devices.
6381 *
6382 * Any protocol or device that holds a reference should register
6383 * for netdevice notification, and cleanup and put back the
6384 * reference if they receive an UNREGISTER event.
6385 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006386 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006387 */
6388static void netdev_wait_allrefs(struct net_device *dev)
6389{
6390 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006391 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006392
Eric Dumazete014deb2009-11-17 05:59:21 +00006393 linkwatch_forget_dev(dev);
6394
Linus Torvalds1da177e2005-04-16 15:20:36 -07006395 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006396 refcnt = netdev_refcnt_read(dev);
6397
6398 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006399 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006400 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006401
6402 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006403 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006404
Eric Dumazet748e2d92012-08-22 21:50:59 +00006405 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006406 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006407 rtnl_lock();
6408
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006409 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006410 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6411 &dev->state)) {
6412 /* We must not have linkwatch events
6413 * pending on unregister. If this
6414 * happens, we simply run the queue
6415 * unscheduled, resulting in a noop
6416 * for this device.
6417 */
6418 linkwatch_run_queue();
6419 }
6420
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006421 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006422
6423 rebroadcast_time = jiffies;
6424 }
6425
6426 msleep(250);
6427
Eric Dumazet29b44332010-10-11 10:22:12 +00006428 refcnt = netdev_refcnt_read(dev);
6429
Linus Torvalds1da177e2005-04-16 15:20:36 -07006430 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006431 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6432 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006433 warning_time = jiffies;
6434 }
6435 }
6436}
6437
6438/* The sequence is:
6439 *
6440 * rtnl_lock();
6441 * ...
6442 * register_netdevice(x1);
6443 * register_netdevice(x2);
6444 * ...
6445 * unregister_netdevice(y1);
6446 * unregister_netdevice(y2);
6447 * ...
6448 * rtnl_unlock();
6449 * free_netdev(y1);
6450 * free_netdev(y2);
6451 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006452 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006453 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006454 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006455 * without deadlocking with linkwatch via keventd.
6456 * 2) Since we run with the RTNL semaphore not held, we can sleep
6457 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006458 *
6459 * We must not return until all unregister events added during
6460 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006461 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006462void netdev_run_todo(void)
6463{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006464 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006465
Linus Torvalds1da177e2005-04-16 15:20:36 -07006466 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006467 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006468
6469 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006470
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006471
6472 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006473 if (!list_empty(&list))
6474 rcu_barrier();
6475
Linus Torvalds1da177e2005-04-16 15:20:36 -07006476 while (!list_empty(&list)) {
6477 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006478 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006479 list_del(&dev->todo_list);
6480
Eric Dumazet748e2d92012-08-22 21:50:59 +00006481 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006482 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006483 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006484
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006485 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006486 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006487 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006488 dump_stack();
6489 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006490 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006491
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006492 dev->reg_state = NETREG_UNREGISTERED;
6493
Changli Gao152102c2010-03-30 20:16:22 +00006494 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006495
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006496 netdev_wait_allrefs(dev);
6497
6498 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006499 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006500 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6501 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006502 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006503
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006504 if (dev->destructor)
6505 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006506
6507 /* Free network device */
6508 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510}
6511
Ben Hutchings3cfde792010-07-09 09:11:52 +00006512/* Convert net_device_stats to rtnl_link_stats64. They have the same
6513 * fields in the same order, with only the type differing.
6514 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006515void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6516 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006517{
6518#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006519 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6520 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006521#else
6522 size_t i, n = sizeof(*stats64) / sizeof(u64);
6523 const unsigned long *src = (const unsigned long *)netdev_stats;
6524 u64 *dst = (u64 *)stats64;
6525
6526 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6527 sizeof(*stats64) / sizeof(u64));
6528 for (i = 0; i < n; i++)
6529 dst[i] = src[i];
6530#endif
6531}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006532EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006533
Eric Dumazetd83345a2009-11-16 03:36:51 +00006534/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006535 * dev_get_stats - get network device statistics
6536 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006537 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006538 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006539 * Get network statistics from device. Return @storage.
6540 * The device driver may provide its own method by setting
6541 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6542 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006543 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006544struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6545 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006546{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006547 const struct net_device_ops *ops = dev->netdev_ops;
6548
Eric Dumazet28172732010-07-07 14:58:56 -07006549 if (ops->ndo_get_stats64) {
6550 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006551 ops->ndo_get_stats64(dev, storage);
6552 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006553 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006554 } else {
6555 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006556 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006557 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006558 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006559}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006560EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006561
Eric Dumazet24824a02010-10-02 06:11:55 +00006562struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006563{
Eric Dumazet24824a02010-10-02 06:11:55 +00006564 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006565
Eric Dumazet24824a02010-10-02 06:11:55 +00006566#ifdef CONFIG_NET_CLS_ACT
6567 if (queue)
6568 return queue;
6569 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6570 if (!queue)
6571 return NULL;
6572 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006573 queue->qdisc = &noop_qdisc;
6574 queue->qdisc_sleeping = &noop_qdisc;
6575 rcu_assign_pointer(dev->ingress_queue, queue);
6576#endif
6577 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006578}
6579
Eric Dumazet2c60db02012-09-16 09:17:26 +00006580static const struct ethtool_ops default_ethtool_ops;
6581
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00006582void netdev_set_default_ethtool_ops(struct net_device *dev,
6583 const struct ethtool_ops *ops)
6584{
6585 if (dev->ethtool_ops == &default_ethtool_ops)
6586 dev->ethtool_ops = ops;
6587}
6588EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6589
Linus Torvalds1da177e2005-04-16 15:20:36 -07006590/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006591 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592 * @sizeof_priv: size of private data to allocate space for
6593 * @name: device name format string
6594 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006595 * @txqs: the number of TX subqueues to allocate
6596 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597 *
6598 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006599 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006600 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006602struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6603 void (*setup)(struct net_device *),
6604 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006605{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006606 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006607 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006608 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006609
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006610 BUG_ON(strlen(name) >= sizeof(dev->name));
6611
Tom Herbert36909ea2011-01-09 19:36:31 +00006612 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006613 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006614 return NULL;
6615 }
6616
Tom Herbert36909ea2011-01-09 19:36:31 +00006617#ifdef CONFIG_RPS
6618 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006619 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006620 return NULL;
6621 }
6622#endif
6623
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006624 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006625 if (sizeof_priv) {
6626 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006627 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006628 alloc_size += sizeof_priv;
6629 }
6630 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006631 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006632
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07006633 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006634 if (!p) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006635 pr_err("alloc_netdev: Unable to allocate device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006636 return NULL;
6637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006638
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006639 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006640 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006641
Eric Dumazet29b44332010-10-11 10:22:12 +00006642 dev->pcpu_refcnt = alloc_percpu(int);
6643 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00006644 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006645
Linus Torvalds1da177e2005-04-16 15:20:36 -07006646 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006647 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006648
Jiri Pirko22bedad32010-04-01 21:22:57 +00006649 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006650 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006651
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006652 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006653
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006654 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006655 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006656
Herbert Xud565b0a2008-12-15 23:38:52 -08006657 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006658 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006659 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006660 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006661 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006662 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006663
6664 dev->num_tx_queues = txqs;
6665 dev->real_num_tx_queues = txqs;
6666 if (netif_alloc_netdev_queues(dev))
6667 goto free_all;
6668
6669#ifdef CONFIG_RPS
6670 dev->num_rx_queues = rxqs;
6671 dev->real_num_rx_queues = rxqs;
6672 if (netif_alloc_rx_queues(dev))
6673 goto free_all;
6674#endif
6675
Linus Torvalds1da177e2005-04-16 15:20:36 -07006676 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006677 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006678 if (!dev->ethtool_ops)
6679 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006680 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006681
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006682free_all:
6683 free_netdev(dev);
6684 return NULL;
6685
Eric Dumazet29b44332010-10-11 10:22:12 +00006686free_pcpu:
6687 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00006688 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006689#ifdef CONFIG_RPS
6690 kfree(dev->_rx);
6691#endif
6692
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006693free_p:
6694 kfree(p);
6695 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006696}
Tom Herbert36909ea2011-01-09 19:36:31 +00006697EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006698
6699/**
6700 * free_netdev - free network device
6701 * @dev: device
6702 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006703 * This function does the last stage of destroying an allocated device
6704 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006705 * If this is the last reference then it will be freed.
6706 */
6707void free_netdev(struct net_device *dev)
6708{
Herbert Xud565b0a2008-12-15 23:38:52 -08006709 struct napi_struct *p, *n;
6710
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006711 release_net(dev_net(dev));
6712
David S. Millere8a04642008-07-17 00:34:19 -07006713 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006714#ifdef CONFIG_RPS
6715 kfree(dev->_rx);
6716#endif
David S. Millere8a04642008-07-17 00:34:19 -07006717
Eric Dumazet33d480c2011-08-11 19:30:52 +00006718 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006719
Jiri Pirkof001fde2009-05-05 02:48:28 +00006720 /* Flush device addresses */
6721 dev_addr_flush(dev);
6722
Herbert Xud565b0a2008-12-15 23:38:52 -08006723 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6724 netif_napi_del(p);
6725
Eric Dumazet29b44332010-10-11 10:22:12 +00006726 free_percpu(dev->pcpu_refcnt);
6727 dev->pcpu_refcnt = NULL;
6728
Stephen Hemminger3041a062006-05-26 13:25:24 -07006729 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006730 if (dev->reg_state == NETREG_UNINITIALIZED) {
6731 kfree((char *)dev - dev->padded);
6732 return;
6733 }
6734
6735 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6736 dev->reg_state = NETREG_RELEASED;
6737
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006738 /* will free via device release */
6739 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006740}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006741EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006742
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006743/**
6744 * synchronize_net - Synchronize with packet receive processing
6745 *
6746 * Wait for packets currently being received to be done.
6747 * Does not block later packets from starting.
6748 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006749void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006750{
6751 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006752 if (rtnl_is_locked())
6753 synchronize_rcu_expedited();
6754 else
6755 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006756}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006757EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758
6759/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006760 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006761 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006762 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006763 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006764 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006765 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006766 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006767 *
6768 * Callers must hold the rtnl semaphore. You may want
6769 * unregister_netdev() instead of this.
6770 */
6771
Eric Dumazet44a08732009-10-27 07:03:04 +00006772void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006773{
Herbert Xua6620712007-12-12 19:21:56 -08006774 ASSERT_RTNL();
6775
Eric Dumazet44a08732009-10-27 07:03:04 +00006776 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006777 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006778 } else {
6779 rollback_registered(dev);
6780 /* Finish processing unregister after unlock */
6781 net_set_todo(dev);
6782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006783}
Eric Dumazet44a08732009-10-27 07:03:04 +00006784EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006785
6786/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006787 * unregister_netdevice_many - unregister many devices
6788 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006789 */
6790void unregister_netdevice_many(struct list_head *head)
6791{
6792 struct net_device *dev;
6793
6794 if (!list_empty(head)) {
6795 rollback_registered_many(head);
6796 list_for_each_entry(dev, head, unreg_list)
6797 net_set_todo(dev);
6798 }
6799}
Eric Dumazet63c80992009-10-27 07:06:49 +00006800EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006801
6802/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006803 * unregister_netdev - remove device from the kernel
6804 * @dev: device
6805 *
6806 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006807 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006808 *
6809 * This is just a wrapper for unregister_netdevice that takes
6810 * the rtnl semaphore. In general you want to use this and not
6811 * unregister_netdevice.
6812 */
6813void unregister_netdev(struct net_device *dev)
6814{
6815 rtnl_lock();
6816 unregister_netdevice(dev);
6817 rtnl_unlock();
6818}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006819EXPORT_SYMBOL(unregister_netdev);
6820
Eric W. Biedermance286d32007-09-12 13:53:49 +02006821/**
6822 * dev_change_net_namespace - move device to different nethost namespace
6823 * @dev: device
6824 * @net: network namespace
6825 * @pat: If not NULL name pattern to try if the current device name
6826 * is already taken in the destination network namespace.
6827 *
6828 * This function shuts down a device interface and moves it
6829 * to a new network namespace. On success 0 is returned, on
6830 * a failure a netagive errno code is returned.
6831 *
6832 * Callers must hold the rtnl semaphore.
6833 */
6834
6835int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6836{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006837 int err;
6838
6839 ASSERT_RTNL();
6840
6841 /* Don't allow namespace local devices to be moved. */
6842 err = -EINVAL;
6843 if (dev->features & NETIF_F_NETNS_LOCAL)
6844 goto out;
6845
6846 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006847 if (dev->reg_state != NETREG_REGISTERED)
6848 goto out;
6849
6850 /* Get out if there is nothing todo */
6851 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006852 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006853 goto out;
6854
6855 /* Pick the destination device name, and ensure
6856 * we can use it in the destination network namespace.
6857 */
6858 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006859 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006860 /* We get here if we can't use the current device name */
6861 if (!pat)
6862 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006863 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006864 goto out;
6865 }
6866
6867 /*
6868 * And now a mini version of register_netdevice unregister_netdevice.
6869 */
6870
6871 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006872 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006873
6874 /* And unlink it from device chain */
6875 err = -ENODEV;
6876 unlist_netdevice(dev);
6877
6878 synchronize_net();
6879
6880 /* Shutdown queueing discipline. */
6881 dev_shutdown(dev);
6882
6883 /* Notify protocols, that we are about to destroy
6884 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006885
6886 Note that dev->reg_state stays at NETREG_REGISTERED.
6887 This is wanted because this way 8021q and macvlan know
6888 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006889 */
6890 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006891 rcu_barrier();
6892 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00006893 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006894
6895 /*
6896 * Flush the unicast and multicast chains
6897 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006898 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006899 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006900
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006901 /* Send a netdev-removed uevent to the old namespace */
6902 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6903
Eric W. Biedermance286d32007-09-12 13:53:49 +02006904 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006905 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006906
Eric W. Biedermance286d32007-09-12 13:53:49 +02006907 /* If there is an ifindex conflict assign a new one */
6908 if (__dev_get_by_index(net, dev->ifindex)) {
6909 int iflink = (dev->iflink == dev->ifindex);
6910 dev->ifindex = dev_new_index(net);
6911 if (iflink)
6912 dev->iflink = dev->ifindex;
6913 }
6914
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006915 /* Send a netdev-add uevent to the new namespace */
6916 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6917
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006918 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006919 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006920 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006921
6922 /* Add the device back in the hashes */
6923 list_netdevice(dev);
6924
6925 /* Notify protocols, that a new device appeared. */
6926 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6927
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006928 /*
6929 * Prevent userspace races by waiting until the network
6930 * device is fully setup before sending notifications.
6931 */
6932 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6933
Eric W. Biedermance286d32007-09-12 13:53:49 +02006934 synchronize_net();
6935 err = 0;
6936out:
6937 return err;
6938}
Johannes Berg463d0182009-07-14 00:33:35 +02006939EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006940
Linus Torvalds1da177e2005-04-16 15:20:36 -07006941static int dev_cpu_callback(struct notifier_block *nfb,
6942 unsigned long action,
6943 void *ocpu)
6944{
6945 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006946 struct sk_buff *skb;
6947 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6948 struct softnet_data *sd, *oldsd;
6949
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006950 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006951 return NOTIFY_OK;
6952
6953 local_irq_disable();
6954 cpu = smp_processor_id();
6955 sd = &per_cpu(softnet_data, cpu);
6956 oldsd = &per_cpu(softnet_data, oldcpu);
6957
6958 /* Find end of our completion_queue. */
6959 list_skb = &sd->completion_queue;
6960 while (*list_skb)
6961 list_skb = &(*list_skb)->next;
6962 /* Append completion queue from offline CPU. */
6963 *list_skb = oldsd->completion_queue;
6964 oldsd->completion_queue = NULL;
6965
Linus Torvalds1da177e2005-04-16 15:20:36 -07006966 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006967 if (oldsd->output_queue) {
6968 *sd->output_queue_tailp = oldsd->output_queue;
6969 sd->output_queue_tailp = oldsd->output_queue_tailp;
6970 oldsd->output_queue = NULL;
6971 oldsd->output_queue_tailp = &oldsd->output_queue;
6972 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006973 /* Append NAPI poll list from offline CPU. */
6974 if (!list_empty(&oldsd->poll_list)) {
6975 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6976 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006978
6979 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6980 local_irq_enable();
6981
6982 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006983 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6984 netif_rx(skb);
6985 input_queue_head_incr(oldsd);
6986 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006987 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006988 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006989 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006991
6992 return NOTIFY_OK;
6993}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994
6995
Herbert Xu7f353bf2007-08-10 15:47:58 -07006996/**
Herbert Xub63365a2008-10-23 01:11:29 -07006997 * netdev_increment_features - increment feature set by one
6998 * @all: current feature set
6999 * @one: new feature set
7000 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07007001 *
7002 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07007003 * @one to the master device with current feature set @all. Will not
7004 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07007005 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007006netdev_features_t netdev_increment_features(netdev_features_t all,
7007 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07007008{
Michał Mirosław1742f182011-04-22 06:31:16 +00007009 if (mask & NETIF_F_GEN_CSUM)
7010 mask |= NETIF_F_ALL_CSUM;
7011 mask |= NETIF_F_VLAN_CHALLENGED;
7012
7013 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7014 all &= one | ~NETIF_F_ALL_FOR_ALL;
7015
Michał Mirosław1742f182011-04-22 06:31:16 +00007016 /* If one device supports hw checksumming, set for all. */
7017 if (all & NETIF_F_GEN_CSUM)
7018 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007019
7020 return all;
7021}
Herbert Xub63365a2008-10-23 01:11:29 -07007022EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007023
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007024static struct hlist_head *netdev_create_hash(void)
7025{
7026 int i;
7027 struct hlist_head *hash;
7028
7029 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7030 if (hash != NULL)
7031 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7032 INIT_HLIST_HEAD(&hash[i]);
7033
7034 return hash;
7035}
7036
Eric W. Biederman881d9662007-09-17 11:56:21 -07007037/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07007038static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007039{
Rustad, Mark D734b6542012-07-18 09:06:07 +00007040 if (net != &init_net)
7041 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07007042
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007043 net->dev_name_head = netdev_create_hash();
7044 if (net->dev_name_head == NULL)
7045 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007046
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007047 net->dev_index_head = netdev_create_hash();
7048 if (net->dev_index_head == NULL)
7049 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007050
7051 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007052
7053err_idx:
7054 kfree(net->dev_name_head);
7055err_name:
7056 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007057}
7058
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007059/**
7060 * netdev_drivername - network driver for the device
7061 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007062 *
7063 * Determine network driver for device.
7064 */
David S. Miller3019de12011-06-06 16:41:33 -07007065const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07007066{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07007067 const struct device_driver *driver;
7068 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07007069 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07007070
7071 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007072 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07007073 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007074
7075 driver = parent->driver;
7076 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07007077 return driver->name;
7078 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007079}
7080
Joe Perchesb004ff42012-09-12 20:12:19 -07007081static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00007082 struct va_format *vaf)
7083{
7084 int r;
7085
Joe Perchesb004ff42012-09-12 20:12:19 -07007086 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07007087 r = dev_printk_emit(level[1] - '0',
7088 dev->dev.parent,
7089 "%s %s %s: %pV",
7090 dev_driver_string(dev->dev.parent),
7091 dev_name(dev->dev.parent),
7092 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007093 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00007094 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007095 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00007096 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007097 }
Joe Perches256df2f2010-06-27 01:02:35 +00007098
7099 return r;
7100}
7101
7102int netdev_printk(const char *level, const struct net_device *dev,
7103 const char *format, ...)
7104{
7105 struct va_format vaf;
7106 va_list args;
7107 int r;
7108
7109 va_start(args, format);
7110
7111 vaf.fmt = format;
7112 vaf.va = &args;
7113
7114 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007115
Joe Perches256df2f2010-06-27 01:02:35 +00007116 va_end(args);
7117
7118 return r;
7119}
7120EXPORT_SYMBOL(netdev_printk);
7121
7122#define define_netdev_printk_level(func, level) \
7123int func(const struct net_device *dev, const char *fmt, ...) \
7124{ \
7125 int r; \
7126 struct va_format vaf; \
7127 va_list args; \
7128 \
7129 va_start(args, fmt); \
7130 \
7131 vaf.fmt = fmt; \
7132 vaf.va = &args; \
7133 \
7134 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007135 \
Joe Perches256df2f2010-06-27 01:02:35 +00007136 va_end(args); \
7137 \
7138 return r; \
7139} \
7140EXPORT_SYMBOL(func);
7141
7142define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7143define_netdev_printk_level(netdev_alert, KERN_ALERT);
7144define_netdev_printk_level(netdev_crit, KERN_CRIT);
7145define_netdev_printk_level(netdev_err, KERN_ERR);
7146define_netdev_printk_level(netdev_warn, KERN_WARNING);
7147define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7148define_netdev_printk_level(netdev_info, KERN_INFO);
7149
Pavel Emelyanov46650792007-10-08 20:38:39 -07007150static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007151{
7152 kfree(net->dev_name_head);
7153 kfree(net->dev_index_head);
7154}
7155
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007156static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007157 .init = netdev_init,
7158 .exit = netdev_exit,
7159};
7160
Pavel Emelyanov46650792007-10-08 20:38:39 -07007161static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007162{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007163 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007164 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007165 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007166 * initial network namespace
7167 */
7168 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007169 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007170 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007171 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007172
7173 /* Ignore unmoveable devices (i.e. loopback) */
7174 if (dev->features & NETIF_F_NETNS_LOCAL)
7175 continue;
7176
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007177 /* Leave virtual devices for the generic cleanup */
7178 if (dev->rtnl_link_ops)
7179 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007180
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007181 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007182 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7183 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007184 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007185 pr_emerg("%s: failed to move %s to init_net: %d\n",
7186 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007187 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007188 }
7189 }
7190 rtnl_unlock();
7191}
7192
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007193static void __net_exit default_device_exit_batch(struct list_head *net_list)
7194{
7195 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007196 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007197 * Do this across as many network namespaces as possible to
7198 * improve batching efficiency.
7199 */
7200 struct net_device *dev;
7201 struct net *net;
7202 LIST_HEAD(dev_kill_list);
7203
7204 rtnl_lock();
7205 list_for_each_entry(net, net_list, exit_list) {
7206 for_each_netdev_reverse(net, dev) {
7207 if (dev->rtnl_link_ops)
7208 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7209 else
7210 unregister_netdevice_queue(dev, &dev_kill_list);
7211 }
7212 }
7213 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007214 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007215 rtnl_unlock();
7216}
7217
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007218static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007219 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007220 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007221};
7222
Linus Torvalds1da177e2005-04-16 15:20:36 -07007223/*
7224 * Initialize the DEV module. At boot time this walks the device list and
7225 * unhooks any devices that fail to initialise (normally hardware not
7226 * present) and leaves us with a valid list of present and active devices.
7227 *
7228 */
7229
7230/*
7231 * This is called single threaded during boot, so no need
7232 * to take the rtnl semaphore.
7233 */
7234static int __init net_dev_init(void)
7235{
7236 int i, rc = -ENOMEM;
7237
7238 BUG_ON(!dev_boot_phase);
7239
Linus Torvalds1da177e2005-04-16 15:20:36 -07007240 if (dev_proc_init())
7241 goto out;
7242
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007243 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007244 goto out;
7245
7246 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007247 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007248 INIT_LIST_HEAD(&ptype_base[i]);
7249
Vlad Yasevich62532da2012-11-15 08:49:10 +00007250 INIT_LIST_HEAD(&offload_base);
7251
Eric W. Biederman881d9662007-09-17 11:56:21 -07007252 if (register_pernet_subsys(&netdev_net_ops))
7253 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007254
7255 /*
7256 * Initialise the packet receive queues.
7257 */
7258
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007259 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007260 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007261
Changli Gaodee42872010-05-02 05:42:16 +00007262 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007263 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007264 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007265 sd->completion_queue = NULL;
7266 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007267 sd->output_queue = NULL;
7268 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007269#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007270 sd->csd.func = rps_trigger_softirq;
7271 sd->csd.info = sd;
7272 sd->csd.flags = 0;
7273 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007274#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007275
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007276 sd->backlog.poll = process_backlog;
7277 sd->backlog.weight = weight_p;
7278 sd->backlog.gro_list = NULL;
7279 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007280 }
7281
Linus Torvalds1da177e2005-04-16 15:20:36 -07007282 dev_boot_phase = 0;
7283
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007284 /* The loopback device is special if any other network devices
7285 * is present in a network namespace the loopback device must
7286 * be present. Since we now dynamically allocate and free the
7287 * loopback device ensure this invariant is maintained by
7288 * keeping the loopback device as the first device on the
7289 * list of network devices. Ensuring the loopback devices
7290 * is the first device that appears and the last network device
7291 * that disappears.
7292 */
7293 if (register_pernet_device(&loopback_net_ops))
7294 goto out;
7295
7296 if (register_pernet_device(&default_device_ops))
7297 goto out;
7298
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007299 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7300 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007301
7302 hotcpu_notifier(dev_cpu_callback, 0);
7303 dst_init();
7304 dev_mcast_init();
7305 rc = 0;
7306out:
7307 return rc;
7308}
7309
7310subsys_initcall(net_dev_init);
7311
Krishna Kumare88721f2009-02-18 17:55:02 -08007312static int __init initialize_hashrnd(void)
7313{
Tom Herbert0a9627f2010-03-16 08:03:29 +00007314 get_random_bytes(&hashrnd, sizeof(hashrnd));
Krishna Kumare88721f2009-02-18 17:55:02 -08007315 return 0;
7316}
7317
7318late_initcall_sync(initialize_hashrnd);
7319