blob: 95de4c0118088fa61b89cdd4fcc5b1f990fc270f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/dst.h>
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900130#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900131#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000132#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700133#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000134#include <linux/cpu_rmap.h>
Richard Cochran4dc360c2011-10-19 17:00:35 -0400135#include <linux/net_tstamp.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100136#include <linux/static_key.h>
Eric Dumazet4504b862011-11-28 05:23:23 +0000137#include <net/flow_keys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700139#include "net-sysfs.h"
140
Herbert Xud565b0a2008-12-15 23:38:52 -0800141/* Instead of increasing this, you should create a hash table. */
142#define MAX_GRO_SKBS 8
143
Herbert Xu5d38a072009-01-04 16:13:40 -0800144/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128)
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
150 *
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
153 *
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700158 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 * --BLG
160 *
161 * 0800 IP
162 * 8100 802.1Q VLAN
163 * 0001 802.3
164 * 0002 AX.25
165 * 0004 802.2
166 * 8035 RARP
167 * 0005 SNAP
168 * 0805 X.25
169 * 0806 ARP
170 * 8137 IPX
171 * 0009 Localtalk
172 * 86DD IPv6
173 */
174
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800175#define PTYPE_HASH_SIZE (16)
176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000179static DEFINE_SPINLOCK(offload_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800180static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700181static struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000182static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * semaphore.
187 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800188 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 *
190 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700191 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
194 *
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
198 *
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
201 * semaphore held.
202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204EXPORT_SYMBOL(dev_base_lock);
205
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000206seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000207
Thomas Graf4e985ad2011-06-21 03:11:20 +0000208static inline void dev_base_seq_inc(struct net *net)
209{
210 while (++net->dev_base_seq == 0);
211}
212
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Eric Dumazet95c96172012-04-15 05:58:06 +0000215 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
216
stephen hemminger08e98972009-11-10 07:20:34 +0000217 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Eric W. Biederman881d9662007-09-17 11:56:21 -0700220static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700222 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000225static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000226{
227#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000228 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000229#endif
230}
231
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000232static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000233{
234#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000235 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000236#endif
237}
238
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239/* Device list insertion */
240static int list_netdevice(struct net_device *dev)
241{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900242 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243
244 ASSERT_RTNL();
245
246 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800247 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000248 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000249 hlist_add_head_rcu(&dev->index_hlist,
250 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200251 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000252
253 dev_base_seq_inc(net);
254
Eric W. Biedermance286d32007-09-12 13:53:49 +0200255 return 0;
256}
257
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000258/* Device list removal
259 * caller must respect a RCU grace period before freeing/reusing dev
260 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200261static void unlist_netdevice(struct net_device *dev)
262{
263 ASSERT_RTNL();
264
265 /* Unlink dev from the device chain */
266 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800267 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000268 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000269 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200270 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000271
272 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275/*
276 * Our notifier list
277 */
278
Alan Sternf07d5b92006-05-09 15:23:03 -0700279static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281/*
282 * Device drivers call our routines to queue packets here. We empty the
283 * queue in the local softnet handler.
284 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700285
Eric Dumazet9958da02010-04-17 04:17:02 +0000286DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700287EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
David S. Millercf508b12008-07-22 14:16:42 -0700289#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290/*
David S. Millerc773e842008-07-08 23:13:53 -0700291 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292 * according to dev->type
293 */
294static const unsigned short netdev_lock_type[] =
295 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
296 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
297 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
298 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
299 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
300 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
301 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
302 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
303 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
304 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
305 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
306 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400307 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
308 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
309 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700310
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700311static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700312 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
313 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
314 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
315 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
316 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
317 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
318 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
319 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
320 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
321 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
322 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
323 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400324 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
325 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
326 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700327
328static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700329static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330
331static inline unsigned short netdev_lock_pos(unsigned short dev_type)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
336 if (netdev_lock_type[i] == dev_type)
337 return i;
338 /* the last key is used by default */
339 return ARRAY_SIZE(netdev_lock_type) - 1;
340}
341
David S. Millercf508b12008-07-22 14:16:42 -0700342static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
343 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700344{
345 int i;
346
347 i = netdev_lock_pos(dev_type);
348 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
349 netdev_lock_name[i]);
350}
David S. Millercf508b12008-07-22 14:16:42 -0700351
352static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
353{
354 int i;
355
356 i = netdev_lock_pos(dev->type);
357 lockdep_set_class_and_name(&dev->addr_list_lock,
358 &netdev_addr_lock_key[i],
359 netdev_lock_name[i]);
360}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700361#else
David S. Millercf508b12008-07-22 14:16:42 -0700362static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
363 unsigned short dev_type)
364{
365}
366static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700367{
368}
369#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371/*******************************************************************************
372
373 Protocol management and registration routines
374
375*******************************************************************************/
376
377/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 * Add a protocol ID to the list. Now that the input handler is
379 * smarter we can dispense with all the messy stuff that used to be
380 * here.
381 *
382 * BEWARE!!! Protocol handlers, mangling input packets,
383 * MUST BE last in hash buckets and checking protocol handlers
384 * MUST start from promiscuous ptype_all chain in net_bh.
385 * It is true now, do not change it.
386 * Explanation follows: if protocol handler, mangling packet, will
387 * be the first on list, it is not able to sense, that packet
388 * is cloned and should be copied-on-write, so that it will
389 * change it and subsequent readers will get broken packet.
390 * --ANK (980803)
391 */
392
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000393static inline struct list_head *ptype_head(const struct packet_type *pt)
394{
395 if (pt->type == htons(ETH_P_ALL))
396 return &ptype_all;
397 else
398 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401/**
402 * dev_add_pack - add packet handler
403 * @pt: packet type declaration
404 *
405 * Add a protocol handler to the networking stack. The passed &packet_type
406 * is linked into kernel lists and may not be freed until it has been
407 * removed from the kernel lists.
408 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900409 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 * guarantee all CPU's that are in middle of receiving packets
411 * will see the new packet type (until the next received packet).
412 */
413
414void dev_add_pack(struct packet_type *pt)
415{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000416 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000418 spin_lock(&ptype_lock);
419 list_add_rcu(&pt->list, head);
420 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * __dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900431 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 *
433 * The packet type might still be in use by receivers
434 * and must not be freed until after all the CPU's have gone
435 * through a quiescent state.
436 */
437void __dev_remove_pack(struct packet_type *pt)
438{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000439 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 struct packet_type *pt1;
441
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000442 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 list_for_each_entry(pt1, head, list) {
445 if (pt == pt1) {
446 list_del_rcu(&pt->list);
447 goto out;
448 }
449 }
450
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000451 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000453 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700455EXPORT_SYMBOL(__dev_remove_pack);
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/**
458 * dev_remove_pack - remove packet handler
459 * @pt: packet type declaration
460 *
461 * Remove a protocol handler that was previously added to the kernel
462 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
463 * from the kernel lists and can be freed or reused once this function
464 * returns.
465 *
466 * This call sleeps to guarantee that no CPU is looking at the packet
467 * type after return.
468 */
469void dev_remove_pack(struct packet_type *pt)
470{
471 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 synchronize_net();
474}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700475EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Vlad Yasevich62532da2012-11-15 08:49:10 +0000477
478/**
479 * dev_add_offload - register offload handlers
480 * @po: protocol offload declaration
481 *
482 * Add protocol offload handlers to the networking stack. The passed
483 * &proto_offload is linked into kernel lists and may not be freed until
484 * it has been removed from the kernel lists.
485 *
486 * This call does not sleep therefore it can not
487 * guarantee all CPU's that are in middle of receiving packets
488 * will see the new offload handlers (until the next received packet).
489 */
490void dev_add_offload(struct packet_offload *po)
491{
492 struct list_head *head = &offload_base;
493
494 spin_lock(&offload_lock);
495 list_add_rcu(&po->list, head);
496 spin_unlock(&offload_lock);
497}
498EXPORT_SYMBOL(dev_add_offload);
499
500/**
501 * __dev_remove_offload - remove offload handler
502 * @po: packet offload declaration
503 *
504 * Remove a protocol offload handler that was previously added to the
505 * kernel offload handlers by dev_add_offload(). The passed &offload_type
506 * is removed from the kernel lists and can be freed or reused once this
507 * function returns.
508 *
509 * The packet type might still be in use by receivers
510 * and must not be freed until after all the CPU's have gone
511 * through a quiescent state.
512 */
513void __dev_remove_offload(struct packet_offload *po)
514{
515 struct list_head *head = &offload_base;
516 struct packet_offload *po1;
517
Eric Dumazetc53aa502012-11-16 08:08:23 +0000518 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000519
520 list_for_each_entry(po1, head, list) {
521 if (po == po1) {
522 list_del_rcu(&po->list);
523 goto out;
524 }
525 }
526
527 pr_warn("dev_remove_offload: %p not found\n", po);
528out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000529 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000530}
531EXPORT_SYMBOL(__dev_remove_offload);
532
533/**
534 * dev_remove_offload - remove packet offload handler
535 * @po: packet offload declaration
536 *
537 * Remove a packet offload handler that was previously added to the kernel
538 * offload handlers by dev_add_offload(). The passed &offload_type is
539 * removed from the kernel lists and can be freed or reused once this
540 * function returns.
541 *
542 * This call sleeps to guarantee that no CPU is looking at the packet
543 * type after return.
544 */
545void dev_remove_offload(struct packet_offload *po)
546{
547 __dev_remove_offload(po);
548
549 synchronize_net();
550}
551EXPORT_SYMBOL(dev_remove_offload);
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553/******************************************************************************
554
555 Device Boot-time Settings Routines
556
557*******************************************************************************/
558
559/* Boot time configuration table */
560static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
561
562/**
563 * netdev_boot_setup_add - add new setup entry
564 * @name: name of the device
565 * @map: configured settings for the device
566 *
567 * Adds new setup entry to the dev_boot_setup list. The function
568 * returns 0 on error and 1 on success. This is a generic routine to
569 * all netdevices.
570 */
571static int netdev_boot_setup_add(char *name, struct ifmap *map)
572{
573 struct netdev_boot_setup *s;
574 int i;
575
576 s = dev_boot_setup;
577 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
578 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
579 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700580 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 memcpy(&s[i].map, map, sizeof(s[i].map));
582 break;
583 }
584 }
585
586 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
587}
588
589/**
590 * netdev_boot_setup_check - check boot time settings
591 * @dev: the netdevice
592 *
593 * Check boot time settings for the device.
594 * The found settings are set for the device to be used
595 * later in the device probing.
596 * Returns 0 if no settings found, 1 if they are.
597 */
598int netdev_boot_setup_check(struct net_device *dev)
599{
600 struct netdev_boot_setup *s = dev_boot_setup;
601 int i;
602
603 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
604 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700605 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 dev->irq = s[i].map.irq;
607 dev->base_addr = s[i].map.base_addr;
608 dev->mem_start = s[i].map.mem_start;
609 dev->mem_end = s[i].map.mem_end;
610 return 1;
611 }
612 }
613 return 0;
614}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700615EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617
618/**
619 * netdev_boot_base - get address from boot time settings
620 * @prefix: prefix for network device
621 * @unit: id for network device
622 *
623 * Check boot time settings for the base address of device.
624 * The found settings are set for the device to be used
625 * later in the device probing.
626 * Returns 0 if no settings found.
627 */
628unsigned long netdev_boot_base(const char *prefix, int unit)
629{
630 const struct netdev_boot_setup *s = dev_boot_setup;
631 char name[IFNAMSIZ];
632 int i;
633
634 sprintf(name, "%s%d", prefix, unit);
635
636 /*
637 * If device already registered then return base of 1
638 * to indicate not to probe for this interface
639 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700640 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return 1;
642
643 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
644 if (!strcmp(name, s[i].name))
645 return s[i].map.base_addr;
646 return 0;
647}
648
649/*
650 * Saves at boot time configured settings for any netdevice.
651 */
652int __init netdev_boot_setup(char *str)
653{
654 int ints[5];
655 struct ifmap map;
656
657 str = get_options(str, ARRAY_SIZE(ints), ints);
658 if (!str || !*str)
659 return 0;
660
661 /* Save settings */
662 memset(&map, 0, sizeof(map));
663 if (ints[0] > 0)
664 map.irq = ints[1];
665 if (ints[0] > 1)
666 map.base_addr = ints[2];
667 if (ints[0] > 2)
668 map.mem_start = ints[3];
669 if (ints[0] > 3)
670 map.mem_end = ints[4];
671
672 /* Add new entry to the list */
673 return netdev_boot_setup_add(str, &map);
674}
675
676__setup("netdev=", netdev_boot_setup);
677
678/*******************************************************************************
679
680 Device Interface Subroutines
681
682*******************************************************************************/
683
684/**
685 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700686 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 * @name: name to find
688 *
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
694 */
695
Eric W. Biederman881d9662007-09-17 11:56:21 -0700696struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
698 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700699 struct net_device *dev;
700 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700702 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (!strncmp(dev->name, name, IFNAMSIZ))
704 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return NULL;
707}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700708EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 * dev_get_by_name_rcu - find a device by its name
712 * @net: the applicable net namespace
713 * @name: name to find
714 *
715 * Find an interface by name.
716 * If the name is found a pointer to the device is returned.
717 * If the name is not found then %NULL is returned.
718 * The reference counters are not incremented so the caller must be
719 * careful with locks. The caller must hold RCU lock.
720 */
721
722struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
723{
724 struct hlist_node *p;
725 struct net_device *dev;
726 struct hlist_head *head = dev_name_hash(net, name);
727
728 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
729 if (!strncmp(dev->name, name, IFNAMSIZ))
730 return dev;
731
732 return NULL;
733}
734EXPORT_SYMBOL(dev_get_by_name_rcu);
735
736/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700738 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 * @name: name to find
740 *
741 * Find an interface by name. This can be called from any
742 * context and does its own locking. The returned handle has
743 * the usage count incremented and the caller must use dev_put() to
744 * release it when it is no longer needed. %NULL is returned if no
745 * matching device is found.
746 */
747
Eric W. Biederman881d9662007-09-17 11:56:21 -0700748struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 struct net_device *dev;
751
Eric Dumazet72c95282009-10-30 07:11:27 +0000752 rcu_read_lock();
753 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if (dev)
755 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return dev;
758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700759EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761/**
762 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700763 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * @ifindex: index of device
765 *
766 * Search for an interface by index. Returns %NULL if the device
767 * is not found or a pointer to the device. The device has not
768 * had its reference counter increased so the caller must be careful
769 * about locking. The caller must hold either the RTNL semaphore
770 * or @dev_base_lock.
771 */
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700776 struct net_device *dev;
777 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700779 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (dev->ifindex == ifindex)
781 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return NULL;
784}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700785EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000787/**
788 * dev_get_by_index_rcu - find a device by its ifindex
789 * @net: the applicable net namespace
790 * @ifindex: index of device
791 *
792 * Search for an interface by index. Returns %NULL if the device
793 * is not found or a pointer to the device. The device has not
794 * had its reference counter increased so the caller must be careful
795 * about locking. The caller must hold RCU lock.
796 */
797
798struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
799{
800 struct hlist_node *p;
801 struct net_device *dev;
802 struct hlist_head *head = dev_index_hash(net, ifindex);
803
804 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
805 if (dev->ifindex == ifindex)
806 return dev;
807
808 return NULL;
809}
810EXPORT_SYMBOL(dev_get_by_index_rcu);
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
813/**
814 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700815 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * @ifindex: index of device
817 *
818 * Search for an interface by index. Returns NULL if the device
819 * is not found or a pointer to the device. The device returned has
820 * had a reference added and the pointer is safe until the user calls
821 * dev_put to indicate they have finished with it.
822 */
823
Eric W. Biederman881d9662007-09-17 11:56:21 -0700824struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
826 struct net_device *dev;
827
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000828 rcu_read_lock();
829 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (dev)
831 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000832 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return dev;
834}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700835EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000838 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700839 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * @type: media type of device
841 * @ha: hardware address
842 *
843 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800844 * is not found or a pointer to the device.
845 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000846 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 * and the caller must therefore be careful about locking
848 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
850
Eric Dumazet941666c2010-12-05 01:23:53 +0000851struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
852 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
854 struct net_device *dev;
855
Eric Dumazet941666c2010-12-05 01:23:53 +0000856 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (dev->type == type &&
858 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700859 return dev;
860
861 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
Eric Dumazet941666c2010-12-05 01:23:53 +0000863EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300864
Eric W. Biederman881d9662007-09-17 11:56:21 -0700865struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700866{
867 struct net_device *dev;
868
869 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700870 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700871 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700872 return dev;
873
874 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700876EXPORT_SYMBOL(__dev_getfirstbyhwtype);
877
Eric W. Biederman881d9662007-09-17 11:56:21 -0700878struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000880 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000882 rcu_read_lock();
883 for_each_netdev_rcu(net, dev)
884 if (dev->type == type) {
885 dev_hold(dev);
886 ret = dev;
887 break;
888 }
889 rcu_read_unlock();
890 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892EXPORT_SYMBOL(dev_getfirstbyhwtype);
893
894/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000895 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700896 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 * @if_flags: IFF_* values
898 * @mask: bitmask of bits in if_flags to check
899 *
900 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000901 * is not found or a pointer to the device. Must be called inside
902 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 */
904
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000905struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700906 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700908 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Pavel Emelianov7562f872007-05-03 15:13:45 -0700910 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800911 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700913 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915 }
916 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700917 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000919EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921/**
922 * dev_valid_name - check if name is okay for network device
923 * @name: name string
924 *
925 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700926 * to allow sysfs to work. We also disallow any kind of
927 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 */
David S. Miller95f050b2012-03-06 16:12:15 -0500929bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700931 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500932 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700933 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500934 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700935 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500936 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700937
938 while (*name) {
939 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500940 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700941 name++;
942 }
David S. Miller95f050b2012-03-06 16:12:15 -0500943 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700945EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200948 * __dev_alloc_name - allocate a name for a device
949 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200951 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 *
953 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700954 * id. It scans list of devices to build up a free map, then chooses
955 * the first empty slot. The caller must hold the dev_base or rtnl lock
956 * while allocating the name and adding the device in order to avoid
957 * duplicates.
958 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
959 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 */
961
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200962static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
964 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 const char *p;
966 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700967 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 struct net_device *d;
969
970 p = strnchr(name, IFNAMSIZ-1, '%');
971 if (p) {
972 /*
973 * Verify the string as this thing may have come from
974 * the user. There must be either one "%d" and no other "%"
975 * characters.
976 */
977 if (p[1] != 'd' || strchr(p + 2, '%'))
978 return -EINVAL;
979
980 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700981 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (!inuse)
983 return -ENOMEM;
984
Eric W. Biederman881d9662007-09-17 11:56:21 -0700985 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (!sscanf(d->name, name, &i))
987 continue;
988 if (i < 0 || i >= max_netdevices)
989 continue;
990
991 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 if (!strncmp(buf, d->name, IFNAMSIZ))
994 set_bit(i, inuse);
995 }
996
997 i = find_first_zero_bit(inuse, max_netdevices);
998 free_page((unsigned long) inuse);
999 }
1000
Octavian Purdilad9031022009-11-18 02:36:59 +00001001 if (buf != name)
1002 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001003 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /* It is possible to run out of possible slots
1007 * when the name is long and there isn't enough space left
1008 * for the digits, or if all bits are used.
1009 */
1010 return -ENFILE;
1011}
1012
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001013/**
1014 * dev_alloc_name - allocate a name for a device
1015 * @dev: device
1016 * @name: name format string
1017 *
1018 * Passed a format string - eg "lt%d" it will try and find a suitable
1019 * id. It scans list of devices to build up a free map, then chooses
1020 * the first empty slot. The caller must hold the dev_base or rtnl lock
1021 * while allocating the name and adding the device in order to avoid
1022 * duplicates.
1023 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1024 * Returns the number of the unit assigned or a negative errno code.
1025 */
1026
1027int dev_alloc_name(struct net_device *dev, const char *name)
1028{
1029 char buf[IFNAMSIZ];
1030 struct net *net;
1031 int ret;
1032
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001033 BUG_ON(!dev_net(dev));
1034 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001035 ret = __dev_alloc_name(net, name, buf);
1036 if (ret >= 0)
1037 strlcpy(dev->name, buf, IFNAMSIZ);
1038 return ret;
1039}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001040EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001041
Gao feng828de4f2012-09-13 20:58:27 +00001042static int dev_alloc_name_ns(struct net *net,
1043 struct net_device *dev,
1044 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001045{
Gao feng828de4f2012-09-13 20:58:27 +00001046 char buf[IFNAMSIZ];
1047 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001048
Gao feng828de4f2012-09-13 20:58:27 +00001049 ret = __dev_alloc_name(net, name, buf);
1050 if (ret >= 0)
1051 strlcpy(dev->name, buf, IFNAMSIZ);
1052 return ret;
1053}
1054
1055static int dev_get_valid_name(struct net *net,
1056 struct net_device *dev,
1057 const char *name)
1058{
1059 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001060
Octavian Purdilad9031022009-11-18 02:36:59 +00001061 if (!dev_valid_name(name))
1062 return -EINVAL;
1063
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001064 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001065 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001066 else if (__dev_get_by_name(net, name))
1067 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001068 else if (dev->name != name)
1069 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001070
1071 return 0;
1072}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074/**
1075 * dev_change_name - change name of a device
1076 * @dev: device
1077 * @newname: name (or format string) must be at least IFNAMSIZ
1078 *
1079 * Change name of a device, can pass format strings "eth%d".
1080 * for wildcarding.
1081 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001082int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Herbert Xufcc5a032007-07-30 17:03:38 -07001084 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001086 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001087 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001090 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001092 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (dev->flags & IFF_UP)
1094 return -EBUSY;
1095
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001096 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001097
1098 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001099 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001100 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001101 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001102
Herbert Xufcc5a032007-07-30 17:03:38 -07001103 memcpy(oldname, dev->name, IFNAMSIZ);
1104
Gao feng828de4f2012-09-13 20:58:27 +00001105 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001106 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001107 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001108 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Herbert Xufcc5a032007-07-30 17:03:38 -07001111rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001112 ret = device_rename(&dev->dev, dev->name);
1113 if (ret) {
1114 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001115 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001116 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001117 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001118
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001119 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001120
Herbert Xu7f988ea2007-07-30 16:35:46 -07001121 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001122 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001123 write_unlock_bh(&dev_base_lock);
1124
1125 synchronize_rcu();
1126
1127 write_lock_bh(&dev_base_lock);
1128 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001129 write_unlock_bh(&dev_base_lock);
1130
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001131 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001132 ret = notifier_to_errno(ret);
1133
1134 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001135 /* err >= 0 after dev_alloc_name() or stores the first errno */
1136 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001137 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001138 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001139 memcpy(dev->name, oldname, IFNAMSIZ);
1140 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001141 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001142 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001143 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001144 }
1145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 return err;
1148}
1149
1150/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001151 * dev_set_alias - change ifalias of a device
1152 * @dev: device
1153 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001154 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001155 *
1156 * Set ifalias for a device,
1157 */
1158int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1159{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001160 char *new_ifalias;
1161
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001162 ASSERT_RTNL();
1163
1164 if (len >= IFALIASZ)
1165 return -EINVAL;
1166
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001167 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001168 kfree(dev->ifalias);
1169 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001170 return 0;
1171 }
1172
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001173 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1174 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001175 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001176 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001177
1178 strlcpy(dev->ifalias, alias, len+1);
1179 return len;
1180}
1181
1182
1183/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001184 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001185 * @dev: device to cause notification
1186 *
1187 * Called to indicate a device has changed features.
1188 */
1189void netdev_features_change(struct net_device *dev)
1190{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001191 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001192}
1193EXPORT_SYMBOL(netdev_features_change);
1194
1195/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 * netdev_state_change - device changes state
1197 * @dev: device to cause notification
1198 *
1199 * Called to indicate a device has changed state. This function calls
1200 * the notifier chains for netdev_chain and sends a NEWLINK message
1201 * to the routing socket.
1202 */
1203void netdev_state_change(struct net_device *dev)
1204{
1205 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001206 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1208 }
1209}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001210EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Amerigo Wangee89bab2012-08-09 22:14:56 +00001212/**
1213 * netdev_notify_peers - notify network peers about existence of @dev
1214 * @dev: network device
1215 *
1216 * Generate traffic such that interested network peers are aware of
1217 * @dev, such as by generating a gratuitous ARP. This may be used when
1218 * a device wants to inform the rest of the network about some sort of
1219 * reconfiguration such as a failover event or virtual machine
1220 * migration.
1221 */
1222void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001223{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001224 rtnl_lock();
1225 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1226 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001227}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001228EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230/**
1231 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001232 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 * @name: name of interface
1234 *
1235 * If a network interface is not present and the process has suitable
1236 * privileges this function loads the module. If module loading is not
1237 * available in this kernel then it becomes a nop.
1238 */
1239
Eric W. Biederman881d9662007-09-17 11:56:21 -07001240void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001242 struct net_device *dev;
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001243 int no_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Eric Dumazet72c95282009-10-30 07:11:27 +00001245 rcu_read_lock();
1246 dev = dev_get_by_name_rcu(net, name);
1247 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001249 no_module = !dev;
1250 if (no_module && capable(CAP_NET_ADMIN))
1251 no_module = request_module("netdev-%s", name);
1252 if (no_module && capable(CAP_SYS_MODULE)) {
1253 if (!request_module("%s", name))
Vinson Lee7cecb522012-06-27 14:32:07 +00001254 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1255 name);
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001258EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Patrick McHardybd380812010-02-26 06:34:53 +00001260static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001262 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001263 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001265 ASSERT_RTNL();
1266
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 if (!netif_device_present(dev))
1268 return -ENODEV;
1269
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001270 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271 ret = notifier_to_errno(ret);
1272 if (ret)
1273 return ret;
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001276
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001277 if (ops->ndo_validate_addr)
1278 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001279
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001280 if (!ret && ops->ndo_open)
1281 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Jeff Garzikbada3392007-10-23 20:19:37 -07001283 if (ret)
1284 clear_bit(__LINK_STATE_START, &dev->state);
1285 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001287 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001288 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001290 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 return ret;
1294}
Patrick McHardybd380812010-02-26 06:34:53 +00001295
1296/**
1297 * dev_open - prepare an interface for use.
1298 * @dev: device to open
1299 *
1300 * Takes a device from down to up state. The device's private open
1301 * function is invoked and then the multicast lists are loaded. Finally
1302 * the device is moved into the up state and a %NETDEV_UP message is
1303 * sent to the netdev notifier chain.
1304 *
1305 * Calling this function on an active interface is a nop. On a failure
1306 * a negative errno code is returned.
1307 */
1308int dev_open(struct net_device *dev)
1309{
1310 int ret;
1311
Patrick McHardybd380812010-02-26 06:34:53 +00001312 if (dev->flags & IFF_UP)
1313 return 0;
1314
Patrick McHardybd380812010-02-26 06:34:53 +00001315 ret = __dev_open(dev);
1316 if (ret < 0)
1317 return ret;
1318
Patrick McHardybd380812010-02-26 06:34:53 +00001319 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1320 call_netdevice_notifiers(NETDEV_UP, dev);
1321
1322 return ret;
1323}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001324EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Octavian Purdila44345722010-12-13 12:44:07 +00001326static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
Octavian Purdila44345722010-12-13 12:44:07 +00001328 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001329
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001330 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001331 might_sleep();
1332
Octavian Purdila44345722010-12-13 12:44:07 +00001333 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001334 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Octavian Purdila44345722010-12-13 12:44:07 +00001336 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Octavian Purdila44345722010-12-13 12:44:07 +00001338 /* Synchronize to scheduled poll. We cannot touch poll list, it
1339 * can be even on different cpu. So just clear netif_running().
1340 *
1341 * dev->stop() will invoke napi_disable() on all of it's
1342 * napi_struct instances on this device.
1343 */
1344 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Octavian Purdila44345722010-12-13 12:44:07 +00001347 dev_deactivate_many(head);
1348
1349 list_for_each_entry(dev, head, unreg_list) {
1350 const struct net_device_ops *ops = dev->netdev_ops;
1351
1352 /*
1353 * Call the device specific close. This cannot fail.
1354 * Only if device is UP
1355 *
1356 * We allow it to be called even after a DETACH hot-plug
1357 * event.
1358 */
1359 if (ops->ndo_stop)
1360 ops->ndo_stop(dev);
1361
Octavian Purdila44345722010-12-13 12:44:07 +00001362 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001363 net_dmaengine_put();
1364 }
1365
1366 return 0;
1367}
1368
1369static int __dev_close(struct net_device *dev)
1370{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001371 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001372 LIST_HEAD(single);
1373
1374 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001375 retval = __dev_close_many(&single);
1376 list_del(&single);
1377 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001378}
1379
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001380static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001381{
1382 struct net_device *dev, *tmp;
1383 LIST_HEAD(tmp_list);
1384
1385 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1386 if (!(dev->flags & IFF_UP))
1387 list_move(&dev->unreg_list, &tmp_list);
1388
1389 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001390
Octavian Purdila44345722010-12-13 12:44:07 +00001391 list_for_each_entry(dev, head, unreg_list) {
1392 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1393 call_netdevice_notifiers(NETDEV_DOWN, dev);
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Octavian Purdila44345722010-12-13 12:44:07 +00001396 /* rollback_registered_many needs the complete original list */
1397 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return 0;
1399}
Patrick McHardybd380812010-02-26 06:34:53 +00001400
1401/**
1402 * dev_close - shutdown an interface.
1403 * @dev: device to shutdown
1404 *
1405 * This function moves an active device into down state. A
1406 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1407 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1408 * chain.
1409 */
1410int dev_close(struct net_device *dev)
1411{
Eric Dumazete14a5992011-05-10 12:26:06 -07001412 if (dev->flags & IFF_UP) {
1413 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001414
Eric Dumazete14a5992011-05-10 12:26:06 -07001415 list_add(&dev->unreg_list, &single);
1416 dev_close_many(&single);
1417 list_del(&single);
1418 }
Patrick McHardybd380812010-02-26 06:34:53 +00001419 return 0;
1420}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001421EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001424/**
1425 * dev_disable_lro - disable Large Receive Offload on a device
1426 * @dev: device
1427 *
1428 * Disable Large Receive Offload (LRO) on a net device. Must be
1429 * called under RTNL. This is needed if received packets may be
1430 * forwarded to another interface.
1431 */
1432void dev_disable_lro(struct net_device *dev)
1433{
Neil Hormanf11970e2011-05-24 08:31:09 +00001434 /*
1435 * If we're trying to disable lro on a vlan device
1436 * use the underlying physical device instead
1437 */
1438 if (is_vlan_dev(dev))
1439 dev = vlan_dev_real_dev(dev);
1440
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001441 dev->wanted_features &= ~NETIF_F_LRO;
1442 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001443
Michał Mirosław22d59692011-04-21 12:42:15 +00001444 if (unlikely(dev->features & NETIF_F_LRO))
1445 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001446}
1447EXPORT_SYMBOL(dev_disable_lro);
1448
1449
Eric W. Biederman881d9662007-09-17 11:56:21 -07001450static int dev_boot_phase = 1;
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452/**
1453 * register_netdevice_notifier - register a network notifier block
1454 * @nb: notifier
1455 *
1456 * Register a notifier to be called when network device events occur.
1457 * The notifier passed is linked into the kernel structures and must
1458 * not be reused until it has been unregistered. A negative errno code
1459 * is returned on a failure.
1460 *
1461 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001462 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 * view of the network device list.
1464 */
1465
1466int register_netdevice_notifier(struct notifier_block *nb)
1467{
1468 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001469 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001470 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 int err;
1472
1473 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001474 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001475 if (err)
1476 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001477 if (dev_boot_phase)
1478 goto unlock;
1479 for_each_net(net) {
1480 for_each_netdev(net, dev) {
1481 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1482 err = notifier_to_errno(err);
1483 if (err)
1484 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Eric W. Biederman881d9662007-09-17 11:56:21 -07001486 if (!(dev->flags & IFF_UP))
1487 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001488
Eric W. Biederman881d9662007-09-17 11:56:21 -07001489 nb->notifier_call(nb, NETDEV_UP, dev);
1490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001492
1493unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 rtnl_unlock();
1495 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001496
1497rollback:
1498 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001499 for_each_net(net) {
1500 for_each_netdev(net, dev) {
1501 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001502 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001503
Eric W. Biederman881d9662007-09-17 11:56:21 -07001504 if (dev->flags & IFF_UP) {
1505 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1506 nb->notifier_call(nb, NETDEV_DOWN, dev);
1507 }
1508 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001510 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001511
RongQing.Li8f891482011-11-30 23:43:07 -05001512outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001513 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001514 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001516EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518/**
1519 * unregister_netdevice_notifier - unregister a network notifier block
1520 * @nb: notifier
1521 *
1522 * Unregister a notifier previously registered by
1523 * register_netdevice_notifier(). The notifier is unlinked into the
1524 * kernel structures and may then be reused. A negative errno code
1525 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001526 *
1527 * After unregistering unregister and down device events are synthesized
1528 * for all devices on the device list to the removed notifier to remove
1529 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 */
1531
1532int unregister_netdevice_notifier(struct notifier_block *nb)
1533{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001534 struct net_device *dev;
1535 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001536 int err;
1537
1538 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001539 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001540 if (err)
1541 goto unlock;
1542
1543 for_each_net(net) {
1544 for_each_netdev(net, dev) {
1545 if (dev->flags & IFF_UP) {
1546 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1547 nb->notifier_call(nb, NETDEV_DOWN, dev);
1548 }
1549 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001550 }
1551 }
1552unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001553 rtnl_unlock();
1554 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001556EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
1558/**
1559 * call_netdevice_notifiers - call all network notifier blocks
1560 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001561 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 *
1563 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001564 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 */
1566
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001567int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
Jiri Pirkoab930472010-04-20 01:45:37 -07001569 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001570 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001572EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Ingo Molnarc5905af2012-02-24 08:31:31 +01001574static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001575#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001576/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001577 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001578 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001579 */
1580static atomic_t netstamp_needed_deferred;
1581#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583void net_enable_timestamp(void)
1584{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001585#ifdef HAVE_JUMP_LABEL
1586 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1587
1588 if (deferred) {
1589 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001590 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001591 return;
1592 }
1593#endif
1594 WARN_ON(in_interrupt());
Ingo Molnarc5905af2012-02-24 08:31:31 +01001595 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001597EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599void net_disable_timestamp(void)
1600{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001601#ifdef HAVE_JUMP_LABEL
1602 if (in_interrupt()) {
1603 atomic_inc(&netstamp_needed_deferred);
1604 return;
1605 }
1606#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001607 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001609EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Eric Dumazet3b098e22010-05-15 23:57:10 -07001611static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612{
Eric Dumazet588f0332011-11-15 04:12:55 +00001613 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001614 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001615 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
1617
Eric Dumazet588f0332011-11-15 04:12:55 +00001618#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001619 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001620 if ((COND) && !(SKB)->tstamp.tv64) \
1621 __net_timestamp(SKB); \
1622 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001623
Richard Cochran4dc360c2011-10-19 17:00:35 -04001624static int net_hwtstamp_validate(struct ifreq *ifr)
1625{
1626 struct hwtstamp_config cfg;
1627 enum hwtstamp_tx_types tx_type;
1628 enum hwtstamp_rx_filters rx_filter;
1629 int tx_type_valid = 0;
1630 int rx_filter_valid = 0;
1631
1632 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1633 return -EFAULT;
1634
1635 if (cfg.flags) /* reserved for future extensions */
1636 return -EINVAL;
1637
1638 tx_type = cfg.tx_type;
1639 rx_filter = cfg.rx_filter;
1640
1641 switch (tx_type) {
1642 case HWTSTAMP_TX_OFF:
1643 case HWTSTAMP_TX_ON:
1644 case HWTSTAMP_TX_ONESTEP_SYNC:
1645 tx_type_valid = 1;
1646 break;
1647 }
1648
1649 switch (rx_filter) {
1650 case HWTSTAMP_FILTER_NONE:
1651 case HWTSTAMP_FILTER_ALL:
1652 case HWTSTAMP_FILTER_SOME:
1653 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1654 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1655 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1657 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1658 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1659 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1660 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1662 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1663 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1664 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1665 rx_filter_valid = 1;
1666 break;
1667 }
1668
1669 if (!tx_type_valid || !rx_filter_valid)
1670 return -ERANGE;
1671
1672 return 0;
1673}
1674
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001675static inline bool is_skb_forwardable(struct net_device *dev,
1676 struct sk_buff *skb)
1677{
1678 unsigned int len;
1679
1680 if (!(dev->flags & IFF_UP))
1681 return false;
1682
1683 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1684 if (skb->len <= len)
1685 return true;
1686
1687 /* if TSO is enabled, we don't care about the length as the packet
1688 * could be forwarded without being segmented before
1689 */
1690 if (skb_is_gso(skb))
1691 return true;
1692
1693 return false;
1694}
1695
Arnd Bergmann44540962009-11-26 06:07:08 +00001696/**
1697 * dev_forward_skb - loopback an skb to another netif
1698 *
1699 * @dev: destination network device
1700 * @skb: buffer to forward
1701 *
1702 * return values:
1703 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001704 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001705 *
1706 * dev_forward_skb can be used for injecting an skb from the
1707 * start_xmit function of one device into the receive queue
1708 * of another device.
1709 *
1710 * The receiving device may be in another namespace, so
1711 * we have to clear all information in the skb that could
1712 * impact namespace isolation.
1713 */
1714int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1715{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001716 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1717 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1718 atomic_long_inc(&dev->rx_dropped);
1719 kfree_skb(skb);
1720 return NET_RX_DROP;
1721 }
1722 }
1723
Arnd Bergmann44540962009-11-26 06:07:08 +00001724 skb_orphan(skb);
Ben Greearc736eef2010-07-22 09:54:47 +00001725 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001726
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001727 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001728 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001729 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001730 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001731 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001732 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001733 skb->dev = dev;
1734 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001735 skb->tstamp.tv64 = 0;
1736 skb->pkt_type = PACKET_HOST;
1737 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001738 skb->mark = 0;
1739 secpath_reset(skb);
1740 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001741 return netif_rx(skb);
1742}
1743EXPORT_SYMBOL_GPL(dev_forward_skb);
1744
Changli Gao71d9dec2010-12-15 19:57:25 +00001745static inline int deliver_skb(struct sk_buff *skb,
1746 struct packet_type *pt_prev,
1747 struct net_device *orig_dev)
1748{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001749 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1750 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001751 atomic_inc(&skb->users);
1752 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1753}
1754
Eric Leblondc0de08d2012-08-16 22:02:58 +00001755static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1756{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001757 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001758 return false;
1759
1760 if (ptype->id_match)
1761 return ptype->id_match(ptype, skb->sk);
1762 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1763 return true;
1764
1765 return false;
1766}
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768/*
1769 * Support routine. Sends outgoing frames to any network
1770 * taps currently in use.
1771 */
1772
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001773static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
1775 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001776 struct sk_buff *skb2 = NULL;
1777 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 rcu_read_lock();
1780 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1781 /* Never send packets back to the socket
1782 * they originated from - MvS (miquels@drinkel.ow.org)
1783 */
1784 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001785 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001786 if (pt_prev) {
1787 deliver_skb(skb2, pt_prev, skb->dev);
1788 pt_prev = ptype;
1789 continue;
1790 }
1791
1792 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 if (!skb2)
1794 break;
1795
Eric Dumazet70978182010-12-20 21:22:51 +00001796 net_timestamp_set(skb2);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 /* skb->nh should be correctly
1799 set by sender, so that the second statement is
1800 just protection against buggy protocols.
1801 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001802 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001804 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001805 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001806 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1807 ntohs(skb2->protocol),
1808 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001809 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001812 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001814 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001817 if (pt_prev)
1818 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 rcu_read_unlock();
1820}
1821
Ben Hutchings2c530402012-07-10 10:55:09 +00001822/**
1823 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001824 * @dev: Network device
1825 * @txq: number of queues available
1826 *
1827 * If real_num_tx_queues is changed the tc mappings may no longer be
1828 * valid. To resolve this verify the tc mapping remains valid and if
1829 * not NULL the mapping. With no priorities mapping to this
1830 * offset/count pair it will no longer be used. In the worst case TC0
1831 * is invalid nothing can be done so disable priority mappings. If is
1832 * expected that drivers will fix this mapping if they can before
1833 * calling netif_set_real_num_tx_queues.
1834 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001835static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001836{
1837 int i;
1838 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1839
1840 /* If TC0 is invalidated disable TC mapping */
1841 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001842 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001843 dev->num_tc = 0;
1844 return;
1845 }
1846
1847 /* Invalidated prio to tc mappings set to TC0 */
1848 for (i = 1; i < TC_BITMASK + 1; i++) {
1849 int q = netdev_get_prio_tc_map(dev, i);
1850
1851 tc = &dev->tc_to_txq[q];
1852 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001853 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1854 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001855 netdev_set_prio_tc_map(dev, i, 0);
1856 }
1857 }
1858}
1859
Alexander Duyck537c00d2013-01-10 08:57:02 +00001860#ifdef CONFIG_XPS
1861static DEFINE_MUTEX(xps_map_mutex);
1862#define xmap_dereference(P) \
1863 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1864
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001865static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1866 int cpu, u16 index)
1867{
1868 struct xps_map *map = NULL;
1869 int pos;
1870
1871 if (dev_maps)
1872 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1873
1874 for (pos = 0; map && pos < map->len; pos++) {
1875 if (map->queues[pos] == index) {
1876 if (map->len > 1) {
1877 map->queues[pos] = map->queues[--map->len];
1878 } else {
1879 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1880 kfree_rcu(map, rcu);
1881 map = NULL;
1882 }
1883 break;
1884 }
1885 }
1886
1887 return map;
1888}
1889
Alexander Duyck024e9672013-01-10 08:57:46 +00001890static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001891{
1892 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001893 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001894 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001895
1896 mutex_lock(&xps_map_mutex);
1897 dev_maps = xmap_dereference(dev->xps_maps);
1898
1899 if (!dev_maps)
1900 goto out_no_maps;
1901
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001902 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001903 for (i = index; i < dev->num_tx_queues; i++) {
1904 if (!remove_xps_queue(dev_maps, cpu, i))
1905 break;
1906 }
1907 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001908 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001909 }
1910
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001911 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001912 RCU_INIT_POINTER(dev->xps_maps, NULL);
1913 kfree_rcu(dev_maps, rcu);
1914 }
1915
Alexander Duyck024e9672013-01-10 08:57:46 +00001916 for (i = index; i < dev->num_tx_queues; i++)
1917 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1918 NUMA_NO_NODE);
1919
Alexander Duyck537c00d2013-01-10 08:57:02 +00001920out_no_maps:
1921 mutex_unlock(&xps_map_mutex);
1922}
1923
Alexander Duyck01c5f862013-01-10 08:57:35 +00001924static struct xps_map *expand_xps_map(struct xps_map *map,
1925 int cpu, u16 index)
1926{
1927 struct xps_map *new_map;
1928 int alloc_len = XPS_MIN_MAP_ALLOC;
1929 int i, pos;
1930
1931 for (pos = 0; map && pos < map->len; pos++) {
1932 if (map->queues[pos] != index)
1933 continue;
1934 return map;
1935 }
1936
1937 /* Need to add queue to this CPU's existing map */
1938 if (map) {
1939 if (pos < map->alloc_len)
1940 return map;
1941
1942 alloc_len = map->alloc_len * 2;
1943 }
1944
1945 /* Need to allocate new map to store queue on this CPU's map */
1946 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1947 cpu_to_node(cpu));
1948 if (!new_map)
1949 return NULL;
1950
1951 for (i = 0; i < pos; i++)
1952 new_map->queues[i] = map->queues[i];
1953 new_map->alloc_len = alloc_len;
1954 new_map->len = pos;
1955
1956 return new_map;
1957}
1958
Alexander Duyck537c00d2013-01-10 08:57:02 +00001959int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1960{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001961 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001962 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001963 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001964 int cpu, numa_node_id = -2;
1965 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001966
1967 mutex_lock(&xps_map_mutex);
1968
1969 dev_maps = xmap_dereference(dev->xps_maps);
1970
Alexander Duyck01c5f862013-01-10 08:57:35 +00001971 /* allocate memory for queue storage */
1972 for_each_online_cpu(cpu) {
1973 if (!cpumask_test_cpu(cpu, mask))
1974 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001975
Alexander Duyck01c5f862013-01-10 08:57:35 +00001976 if (!new_dev_maps)
1977 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1978 if (!new_dev_maps)
1979 return -ENOMEM;
1980
1981 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1982 NULL;
1983
1984 map = expand_xps_map(map, cpu, index);
1985 if (!map)
1986 goto error;
1987
1988 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1989 }
1990
1991 if (!new_dev_maps)
1992 goto out_no_new_maps;
1993
1994 for_each_possible_cpu(cpu) {
1995 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1996 /* add queue to CPU maps */
1997 int pos = 0;
1998
1999 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2000 while ((pos < map->len) && (map->queues[pos] != index))
2001 pos++;
2002
2003 if (pos == map->len)
2004 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002005#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00002006 if (numa_node_id == -2)
2007 numa_node_id = cpu_to_node(cpu);
2008 else if (numa_node_id != cpu_to_node(cpu))
2009 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002010#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002011 } else if (dev_maps) {
2012 /* fill in the new device map from the old device map */
2013 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2014 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002015 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002016
Alexander Duyck537c00d2013-01-10 08:57:02 +00002017 }
2018
Alexander Duyck01c5f862013-01-10 08:57:35 +00002019 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2020
Alexander Duyck537c00d2013-01-10 08:57:02 +00002021 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002022 if (dev_maps) {
2023 for_each_possible_cpu(cpu) {
2024 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2025 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2026 if (map && map != new_map)
2027 kfree_rcu(map, rcu);
2028 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002029
Alexander Duyck537c00d2013-01-10 08:57:02 +00002030 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002031 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002032
Alexander Duyck01c5f862013-01-10 08:57:35 +00002033 dev_maps = new_dev_maps;
2034 active = true;
2035
2036out_no_new_maps:
2037 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002038 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2039 (numa_node_id >= 0) ? numa_node_id :
2040 NUMA_NO_NODE);
2041
Alexander Duyck01c5f862013-01-10 08:57:35 +00002042 if (!dev_maps)
2043 goto out_no_maps;
2044
2045 /* removes queue from unused CPUs */
2046 for_each_possible_cpu(cpu) {
2047 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2048 continue;
2049
2050 if (remove_xps_queue(dev_maps, cpu, index))
2051 active = true;
2052 }
2053
2054 /* free map if not active */
2055 if (!active) {
2056 RCU_INIT_POINTER(dev->xps_maps, NULL);
2057 kfree_rcu(dev_maps, rcu);
2058 }
2059
2060out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002061 mutex_unlock(&xps_map_mutex);
2062
2063 return 0;
2064error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002065 /* remove any maps that we added */
2066 for_each_possible_cpu(cpu) {
2067 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2068 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2069 NULL;
2070 if (new_map && new_map != map)
2071 kfree(new_map);
2072 }
2073
Alexander Duyck537c00d2013-01-10 08:57:02 +00002074 mutex_unlock(&xps_map_mutex);
2075
Alexander Duyck537c00d2013-01-10 08:57:02 +00002076 kfree(new_dev_maps);
2077 return -ENOMEM;
2078}
2079EXPORT_SYMBOL(netif_set_xps_queue);
2080
2081#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002082/*
2083 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2084 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2085 */
Tom Herberte6484932010-10-18 18:04:39 +00002086int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002087{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002088 int rc;
2089
Tom Herberte6484932010-10-18 18:04:39 +00002090 if (txq < 1 || txq > dev->num_tx_queues)
2091 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002092
Ben Hutchings5c565802011-02-15 19:39:21 +00002093 if (dev->reg_state == NETREG_REGISTERED ||
2094 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002095 ASSERT_RTNL();
2096
Tom Herbert1d24eb42010-11-21 13:17:27 +00002097 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2098 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002099 if (rc)
2100 return rc;
2101
John Fastabend4f57c082011-01-17 08:06:04 +00002102 if (dev->num_tc)
2103 netif_setup_tc(dev, txq);
2104
Alexander Duyck024e9672013-01-10 08:57:46 +00002105 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002106 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002107#ifdef CONFIG_XPS
2108 netif_reset_xps_queues_gt(dev, txq);
2109#endif
2110 }
John Fastabendf0796d52010-07-01 13:21:57 +00002111 }
Tom Herberte6484932010-10-18 18:04:39 +00002112
2113 dev->real_num_tx_queues = txq;
2114 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002115}
2116EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002117
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002118#ifdef CONFIG_RPS
2119/**
2120 * netif_set_real_num_rx_queues - set actual number of RX queues used
2121 * @dev: Network device
2122 * @rxq: Actual number of RX queues
2123 *
2124 * This must be called either with the rtnl_lock held or before
2125 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002126 * negative error code. If called before registration, it always
2127 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002128 */
2129int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2130{
2131 int rc;
2132
Tom Herbertbd25fa72010-10-18 18:00:16 +00002133 if (rxq < 1 || rxq > dev->num_rx_queues)
2134 return -EINVAL;
2135
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002136 if (dev->reg_state == NETREG_REGISTERED) {
2137 ASSERT_RTNL();
2138
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002139 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2140 rxq);
2141 if (rc)
2142 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002143 }
2144
2145 dev->real_num_rx_queues = rxq;
2146 return 0;
2147}
2148EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2149#endif
2150
Ben Hutchings2c530402012-07-10 10:55:09 +00002151/**
2152 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002153 *
2154 * This routine should set an upper limit on the number of RSS queues
2155 * used by default by multiqueue devices.
2156 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002157int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002158{
2159 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2160}
2161EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2162
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002163static inline void __netif_reschedule(struct Qdisc *q)
2164{
2165 struct softnet_data *sd;
2166 unsigned long flags;
2167
2168 local_irq_save(flags);
2169 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002170 q->next_sched = NULL;
2171 *sd->output_queue_tailp = q;
2172 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002173 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2174 local_irq_restore(flags);
2175}
2176
David S. Miller37437bb2008-07-16 02:15:04 -07002177void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002178{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002179 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2180 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002181}
2182EXPORT_SYMBOL(__netif_schedule);
2183
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002184void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002185{
David S. Miller3578b0c2010-08-03 00:24:04 -07002186 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002187 struct softnet_data *sd;
2188 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002189
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002190 local_irq_save(flags);
2191 sd = &__get_cpu_var(softnet_data);
2192 skb->next = sd->completion_queue;
2193 sd->completion_queue = skb;
2194 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2195 local_irq_restore(flags);
2196 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002197}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002198EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002199
2200void dev_kfree_skb_any(struct sk_buff *skb)
2201{
2202 if (in_irq() || irqs_disabled())
2203 dev_kfree_skb_irq(skb);
2204 else
2205 dev_kfree_skb(skb);
2206}
2207EXPORT_SYMBOL(dev_kfree_skb_any);
2208
2209
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002210/**
2211 * netif_device_detach - mark device as removed
2212 * @dev: network device
2213 *
2214 * Mark device as removed from system and therefore no longer available.
2215 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002216void netif_device_detach(struct net_device *dev)
2217{
2218 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2219 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002220 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002221 }
2222}
2223EXPORT_SYMBOL(netif_device_detach);
2224
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002225/**
2226 * netif_device_attach - mark device as attached
2227 * @dev: network device
2228 *
2229 * Mark device as attached from system and restart if needed.
2230 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002231void netif_device_attach(struct net_device *dev)
2232{
2233 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2234 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002235 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002236 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002237 }
2238}
2239EXPORT_SYMBOL(netif_device_attach);
2240
Ben Hutchings36c92472012-01-17 07:57:56 +00002241static void skb_warn_bad_offload(const struct sk_buff *skb)
2242{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002243 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002244 struct net_device *dev = skb->dev;
2245 const char *driver = "";
2246
2247 if (dev && dev->dev.parent)
2248 driver = dev_driver_string(dev->dev.parent);
2249
2250 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2251 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002252 driver, dev ? &dev->features : &null_features,
2253 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002254 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2255 skb_shinfo(skb)->gso_type, skb->ip_summed);
2256}
2257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258/*
2259 * Invalidate hardware checksum when packet is to be mangled, and
2260 * complete checksum manually on outgoing path.
2261 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002262int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263{
Al Virod3bc23e2006-11-14 21:24:49 -08002264 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002265 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
Patrick McHardy84fa7932006-08-29 16:44:56 -07002267 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002268 goto out_set_summed;
2269
2270 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002271 skb_warn_bad_offload(skb);
2272 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 }
2274
Michał Mirosław55508d62010-12-14 15:24:08 +00002275 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002276 BUG_ON(offset >= skb_headlen(skb));
2277 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2278
2279 offset += skb->csum_offset;
2280 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2281
2282 if (skb_cloned(skb) &&
2283 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2285 if (ret)
2286 goto out;
2287 }
2288
Herbert Xua0308472007-10-15 01:47:15 -07002289 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002290out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002292out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 return ret;
2294}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002295EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002297/**
2298 * skb_gso_segment - Perform segmentation on skb.
2299 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002300 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002301 *
2302 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002303 *
2304 * It may return NULL if the skb requires no segmentation. This is
2305 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002306 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002307struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2308 netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002309{
2310 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
Vlad Yasevich22061d82012-11-15 08:49:11 +00002311 struct packet_offload *ptype;
Al Viro252e3342006-11-14 20:48:11 -08002312 __be16 type = skb->protocol;
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002313 int vlan_depth = ETH_HLEN;
Herbert Xua430a432006-07-08 13:34:56 -07002314 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002315
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002316 while (type == htons(ETH_P_8021Q)) {
2317 struct vlan_hdr *vh;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002318
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002319 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
Jesse Gross7b9c6092010-10-20 13:56:04 +00002320 return ERR_PTR(-EINVAL);
2321
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002322 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2323 type = vh->h_vlan_encapsulated_proto;
2324 vlan_depth += VLAN_HLEN;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002325 }
2326
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002327 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002328 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002329 __skb_pull(skb, skb->mac_len);
2330
Herbert Xu67fd1a72009-01-19 16:26:44 -08002331 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002332 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002333
Herbert Xua430a432006-07-08 13:34:56 -07002334 if (skb_header_cloned(skb) &&
2335 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2336 return ERR_PTR(err);
2337 }
2338
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002339 rcu_read_lock();
Vlad Yasevich22061d82012-11-15 08:49:11 +00002340 list_for_each_entry_rcu(ptype, &offload_base, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002341 if (ptype->type == type && ptype->callbacks.gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07002342 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002343 err = ptype->callbacks.gso_send_check(skb);
Herbert Xua430a432006-07-08 13:34:56 -07002344 segs = ERR_PTR(err);
2345 if (err || skb_gso_ok(skb, features))
2346 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07002347 __skb_push(skb, (skb->data -
2348 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07002349 }
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002350 segs = ptype->callbacks.gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002351 break;
2352 }
2353 }
2354 rcu_read_unlock();
2355
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07002356 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07002357
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002358 return segs;
2359}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002360EXPORT_SYMBOL(skb_gso_segment);
2361
Herbert Xufb286bb2005-11-10 13:01:24 -08002362/* Take action when hardware reception checksum errors are detected. */
2363#ifdef CONFIG_BUG
2364void netdev_rx_csum_fault(struct net_device *dev)
2365{
2366 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002367 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002368 dump_stack();
2369 }
2370}
2371EXPORT_SYMBOL(netdev_rx_csum_fault);
2372#endif
2373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374/* Actually, we should eliminate this check as soon as we know, that:
2375 * 1. IOMMU is present and allows to map all the memory.
2376 * 2. No high memory really exists on this machine.
2377 */
2378
Eric Dumazet9092c652010-04-02 13:34:49 -07002379static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002381#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002383 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002384 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2385 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2386 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002387 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002388 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002389 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002391 if (PCI_DMA_BUS_IS_PHYS) {
2392 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
Eric Dumazet9092c652010-04-02 13:34:49 -07002394 if (!pdev)
2395 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002396 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002397 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2398 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002399 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2400 return 1;
2401 }
2402 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002403#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 return 0;
2405}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002407struct dev_gso_cb {
2408 void (*destructor)(struct sk_buff *skb);
2409};
2410
2411#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2412
2413static void dev_gso_skb_destructor(struct sk_buff *skb)
2414{
2415 struct dev_gso_cb *cb;
2416
2417 do {
2418 struct sk_buff *nskb = skb->next;
2419
2420 skb->next = nskb->next;
2421 nskb->next = NULL;
2422 kfree_skb(nskb);
2423 } while (skb->next);
2424
2425 cb = DEV_GSO_CB(skb);
2426 if (cb->destructor)
2427 cb->destructor(skb);
2428}
2429
2430/**
2431 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2432 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002433 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002434 *
2435 * This function segments the given skb and stores the list of segments
2436 * in skb->next.
2437 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002438static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002439{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002440 struct sk_buff *segs;
2441
Herbert Xu576a30e2006-06-27 13:22:38 -07002442 segs = skb_gso_segment(skb, features);
2443
2444 /* Verifying header integrity only. */
2445 if (!segs)
2446 return 0;
2447
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002448 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002449 return PTR_ERR(segs);
2450
2451 skb->next = segs;
2452 DEV_GSO_CB(skb)->destructor = skb->destructor;
2453 skb->destructor = dev_gso_skb_destructor;
2454
2455 return 0;
2456}
2457
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002458static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
Jesse Gross03634662011-01-09 06:23:35 +00002459{
2460 return ((features & NETIF_F_GEN_CSUM) ||
2461 ((features & NETIF_F_V4_CSUM) &&
2462 protocol == htons(ETH_P_IP)) ||
2463 ((features & NETIF_F_V6_CSUM) &&
2464 protocol == htons(ETH_P_IPV6)) ||
2465 ((features & NETIF_F_FCOE_CRC) &&
2466 protocol == htons(ETH_P_FCOE)));
2467}
2468
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002469static netdev_features_t harmonize_features(struct sk_buff *skb,
2470 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002471{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002472 if (skb->ip_summed != CHECKSUM_NONE &&
2473 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002474 features &= ~NETIF_F_ALL_CSUM;
2475 features &= ~NETIF_F_SG;
2476 } else if (illegal_highdma(skb->dev, skb)) {
2477 features &= ~NETIF_F_SG;
2478 }
2479
2480 return features;
2481}
2482
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002483netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002484{
2485 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002486 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002487
Ben Hutchings30b678d2012-07-30 15:57:00 +00002488 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2489 features &= ~NETIF_F_GSO_MASK;
2490
Jesse Gross58e998c2010-10-29 12:14:55 +00002491 if (protocol == htons(ETH_P_8021Q)) {
2492 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2493 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002494 } else if (!vlan_tx_tag_present(skb)) {
2495 return harmonize_features(skb, protocol, features);
2496 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002497
Jesse Gross6ee400a2011-01-17 20:46:00 +00002498 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002499
2500 if (protocol != htons(ETH_P_8021Q)) {
2501 return harmonize_features(skb, protocol, features);
2502 } else {
2503 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Jesse Gross6ee400a2011-01-17 20:46:00 +00002504 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002505 return harmonize_features(skb, protocol, features);
2506 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002507}
Jesse Grossf01a5232011-01-09 06:23:31 +00002508EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002509
John Fastabend6afff0c2010-06-16 14:18:12 +00002510/*
2511 * Returns true if either:
2512 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002513 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002514 */
2515static inline int skb_needs_linearize(struct sk_buff *skb,
Jesse Gross02932ce2011-01-09 06:23:34 +00002516 int features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002517{
Jesse Gross02932ce2011-01-09 06:23:34 +00002518 return skb_is_nonlinear(skb) &&
2519 ((skb_has_frag_list(skb) &&
2520 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002521 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002522 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002523}
2524
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002525int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2526 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002527{
Stephen Hemminger00829822008-11-20 20:14:53 -08002528 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002529 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002530 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002531
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002532 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002533 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002534
Eric Dumazet93f154b2009-05-18 22:19:19 -07002535 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002536 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002537 * its hot in this cpu cache
2538 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002539 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2540 skb_dst_drop(skb);
2541
Jesse Grossfc741212011-01-09 06:23:32 +00002542 features = netif_skb_features(skb);
2543
Jesse Gross7b9c6092010-10-20 13:56:04 +00002544 if (vlan_tx_tag_present(skb) &&
Jesse Grossfc741212011-01-09 06:23:32 +00002545 !(features & NETIF_F_HW_VLAN_TX)) {
Jesse Gross7b9c6092010-10-20 13:56:04 +00002546 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2547 if (unlikely(!skb))
2548 goto out;
2549
2550 skb->vlan_tci = 0;
2551 }
2552
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002553 /* If encapsulation offload request, verify we are testing
2554 * hardware encapsulation features instead of standard
2555 * features for the netdev
2556 */
2557 if (skb->encapsulation)
2558 features &= dev->hw_enc_features;
2559
Jesse Grossfc741212011-01-09 06:23:32 +00002560 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002561 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002562 goto out_kfree_skb;
2563 if (skb->next)
2564 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002565 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002566 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002567 __skb_linearize(skb))
2568 goto out_kfree_skb;
2569
2570 /* If packet is not checksummed and device does not
2571 * support checksumming for this protocol, complete
2572 * checksumming here.
2573 */
2574 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002575 if (skb->encapsulation)
2576 skb_set_inner_transport_header(skb,
2577 skb_checksum_start_offset(skb));
2578 else
2579 skb_set_transport_header(skb,
2580 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002581 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002582 skb_checksum_help(skb))
2583 goto out_kfree_skb;
2584 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002585 }
2586
Eric Dumazetb40863c2012-09-18 20:44:49 +00002587 if (!list_empty(&ptype_all))
2588 dev_queue_xmit_nit(skb, dev);
2589
Koki Sanagiec764bf2011-05-30 21:48:34 +00002590 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002591 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002592 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002593 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002594 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002595 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002596 }
2597
Herbert Xu576a30e2006-06-27 13:22:38 -07002598gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002599 do {
2600 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002601
2602 skb->next = nskb->next;
2603 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002604
2605 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002606 * If device doesn't need nskb->dst, release it right now while
Krishna Kumar068a2de2009-12-09 20:59:58 +00002607 * its hot in this cpu cache
2608 */
2609 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2610 skb_dst_drop(nskb);
2611
Eric Dumazetb40863c2012-09-18 20:44:49 +00002612 if (!list_empty(&ptype_all))
2613 dev_queue_xmit_nit(nskb, dev);
2614
Koki Sanagiec764bf2011-05-30 21:48:34 +00002615 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002616 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002617 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002618 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002619 if (rc & ~NETDEV_TX_MASK)
2620 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002621 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002622 skb->next = nskb;
2623 return rc;
2624 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002625 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002626 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002627 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002628 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002629
Patrick McHardy572a9d72009-11-10 06:14:14 +00002630out_kfree_gso_skb:
2631 if (likely(skb->next == NULL))
2632 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002633out_kfree_skb:
2634 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002635out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002636 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002637}
2638
Tom Herbert0a9627f2010-03-16 08:03:29 +00002639static u32 hashrnd __read_mostly;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002640
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002641/*
2642 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2643 * to be used as a distribution range.
2644 */
2645u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2646 unsigned int num_tx_queues)
David S. Miller8f0f2222008-07-15 03:47:03 -07002647{
David S. Miller70192982009-01-27 16:34:47 -08002648 u32 hash;
John Fastabend4f57c082011-01-17 08:06:04 +00002649 u16 qoffset = 0;
2650 u16 qcount = num_tx_queues;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002651
David S. Miller513de112009-05-03 14:43:10 -07002652 if (skb_rx_queue_recorded(skb)) {
2653 hash = skb_get_rx_queue(skb);
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002654 while (unlikely(hash >= num_tx_queues))
2655 hash -= num_tx_queues;
David S. Miller513de112009-05-03 14:43:10 -07002656 return hash;
2657 }
Eric Dumazetec581f62009-05-01 09:05:06 -07002658
John Fastabend4f57c082011-01-17 08:06:04 +00002659 if (dev->num_tc) {
2660 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2661 qoffset = dev->tc_to_txq[tc].offset;
2662 qcount = dev->tc_to_txq[tc].count;
2663 }
2664
Eric Dumazetec581f62009-05-01 09:05:06 -07002665 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08002666 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07002667 else
Eric Dumazet62b1a8a2012-06-14 06:42:44 +00002668 hash = (__force u16) skb->protocol;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002669 hash = jhash_1word(hash, hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08002670
John Fastabend4f57c082011-01-17 08:06:04 +00002671 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
David S. Miller8f0f2222008-07-15 03:47:03 -07002672}
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002673EXPORT_SYMBOL(__skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07002674
Eric Dumazeted046422009-11-13 21:54:04 +00002675static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2676{
2677 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
Joe Perchese87cc472012-05-13 21:56:26 +00002678 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2679 dev->name, queue_index,
2680 dev->real_num_tx_queues);
Eric Dumazeted046422009-11-13 21:54:04 +00002681 return 0;
2682 }
2683 return queue_index;
2684}
2685
Tom Herbert1d24eb42010-11-21 13:17:27 +00002686static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2687{
Tom Herbertbf264142010-11-26 08:36:09 +00002688#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00002689 struct xps_dev_maps *dev_maps;
2690 struct xps_map *map;
2691 int queue_index = -1;
2692
2693 rcu_read_lock();
2694 dev_maps = rcu_dereference(dev->xps_maps);
2695 if (dev_maps) {
2696 map = rcu_dereference(
2697 dev_maps->cpu_map[raw_smp_processor_id()]);
2698 if (map) {
2699 if (map->len == 1)
2700 queue_index = map->queues[0];
2701 else {
2702 u32 hash;
2703 if (skb->sk && skb->sk->sk_hash)
2704 hash = skb->sk->sk_hash;
2705 else
2706 hash = (__force u16) skb->protocol ^
2707 skb->rxhash;
2708 hash = jhash_1word(hash, hashrnd);
2709 queue_index = map->queues[
2710 ((u64)hash * map->len) >> 32];
2711 }
2712 if (unlikely(queue_index >= dev->real_num_tx_queues))
2713 queue_index = -1;
2714 }
2715 }
2716 rcu_read_unlock();
2717
2718 return queue_index;
2719#else
2720 return -1;
2721#endif
2722}
2723
Alexander Duyck416186f2013-01-10 08:56:51 +00002724u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2725{
2726 struct sock *sk = skb->sk;
2727 int queue_index = sk_tx_queue_get(sk);
2728
2729 if (queue_index < 0 || skb->ooo_okay ||
2730 queue_index >= dev->real_num_tx_queues) {
2731 int new_index = get_xps_queue(dev, skb);
2732 if (new_index < 0)
2733 new_index = skb_tx_hash(dev, skb);
2734
2735 if (queue_index != new_index && sk) {
2736 struct dst_entry *dst =
2737 rcu_dereference_check(sk->sk_dst_cache, 1);
2738
2739 if (dst && skb_dst(skb) == dst)
2740 sk_tx_queue_set(sk, queue_index);
2741
2742 }
2743
2744 queue_index = new_index;
2745 }
2746
2747 return queue_index;
2748}
2749
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002750struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2751 struct sk_buff *skb)
David S. Millere8a04642008-07-17 00:34:19 -07002752{
Alexander Duyck416186f2013-01-10 08:56:51 +00002753 int queue_index = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002754
Alexander Duyck416186f2013-01-10 08:56:51 +00002755 if (dev->real_num_tx_queues != 1) {
2756 const struct net_device_ops *ops = dev->netdev_ops;
2757 if (ops->ndo_select_queue)
2758 queue_index = ops->ndo_select_queue(dev, skb);
2759 else
2760 queue_index = __netdev_pick_tx(dev, skb);
Helmut Schaadeabc772010-09-03 02:39:56 +00002761 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002762 }
David S. Millereae792b2008-07-15 03:03:33 -07002763
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002764 skb_set_queue_mapping(skb, queue_index);
2765 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07002766}
2767
Eric Dumazet1def9232013-01-10 12:36:42 +00002768static void qdisc_pkt_len_init(struct sk_buff *skb)
2769{
2770 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2771
2772 qdisc_skb_cb(skb)->pkt_len = skb->len;
2773
2774 /* To get more precise estimation of bytes sent on wire,
2775 * we add to pkt_len the headers size of all segments
2776 */
2777 if (shinfo->gso_size) {
2778 unsigned int hdr_len = skb_transport_offset(skb);
2779
2780 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2781 hdr_len += tcp_hdrlen(skb);
2782 else
2783 hdr_len += sizeof(struct udphdr);
2784 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
2785 }
2786}
2787
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002788static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2789 struct net_device *dev,
2790 struct netdev_queue *txq)
2791{
2792 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002793 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002794 int rc;
2795
Eric Dumazet1def9232013-01-10 12:36:42 +00002796 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002797 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002798 /*
2799 * Heuristic to force contended enqueues to serialize on a
2800 * separate lock before trying to get qdisc main lock.
2801 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2802 * and dequeue packets faster.
2803 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002804 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002805 if (unlikely(contended))
2806 spin_lock(&q->busylock);
2807
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002808 spin_lock(root_lock);
2809 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2810 kfree_skb(skb);
2811 rc = NET_XMIT_DROP;
2812 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002813 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002814 /*
2815 * This is a work-conserving queue; there are no old skbs
2816 * waiting to be sent out; and the qdisc is not running -
2817 * xmit the skb directly.
2818 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002819 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2820 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002821
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002822 qdisc_bstats_update(q, skb);
2823
Eric Dumazet79640a42010-06-02 05:09:29 -07002824 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2825 if (unlikely(contended)) {
2826 spin_unlock(&q->busylock);
2827 contended = false;
2828 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002829 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002830 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002831 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002832
2833 rc = NET_XMIT_SUCCESS;
2834 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002835 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002836 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002837 if (qdisc_run_begin(q)) {
2838 if (unlikely(contended)) {
2839 spin_unlock(&q->busylock);
2840 contended = false;
2841 }
2842 __qdisc_run(q);
2843 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002844 }
2845 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002846 if (unlikely(contended))
2847 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002848 return rc;
2849}
2850
Neil Horman5bc14212011-11-22 05:10:51 +00002851#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2852static void skb_update_prio(struct sk_buff *skb)
2853{
Igor Maravic6977a792011-11-25 07:44:54 +00002854 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002855
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002856 if (!skb->priority && skb->sk && map) {
2857 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2858
2859 if (prioidx < map->priomap_len)
2860 skb->priority = map->priomap[prioidx];
2861 }
Neil Horman5bc14212011-11-22 05:10:51 +00002862}
2863#else
2864#define skb_update_prio(skb)
2865#endif
2866
Eric Dumazet745e20f2010-09-29 13:23:09 -07002867static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002868#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002869
Dave Jonesd29f7492008-07-22 14:09:06 -07002870/**
Michel Machado95603e22012-06-12 10:16:35 +00002871 * dev_loopback_xmit - loop back @skb
2872 * @skb: buffer to transmit
2873 */
2874int dev_loopback_xmit(struct sk_buff *skb)
2875{
2876 skb_reset_mac_header(skb);
2877 __skb_pull(skb, skb_network_offset(skb));
2878 skb->pkt_type = PACKET_LOOPBACK;
2879 skb->ip_summed = CHECKSUM_UNNECESSARY;
2880 WARN_ON(!skb_dst(skb));
2881 skb_dst_force(skb);
2882 netif_rx_ni(skb);
2883 return 0;
2884}
2885EXPORT_SYMBOL(dev_loopback_xmit);
2886
2887/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002888 * dev_queue_xmit - transmit a buffer
2889 * @skb: buffer to transmit
2890 *
2891 * Queue a buffer for transmission to a network device. The caller must
2892 * have set the device and priority and built the buffer before calling
2893 * this function. The function can be called from an interrupt.
2894 *
2895 * A negative errno code is returned on a failure. A success does not
2896 * guarantee the frame will be transmitted as it may be dropped due
2897 * to congestion or traffic shaping.
2898 *
2899 * -----------------------------------------------------------------------------------
2900 * I notice this method can also return errors from the queue disciplines,
2901 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2902 * be positive.
2903 *
2904 * Regardless of the return value, the skb is consumed, so it is currently
2905 * difficult to retry a send to this method. (You can bump the ref count
2906 * before sending to hold a reference for retry if you are careful.)
2907 *
2908 * When calling this method, interrupts MUST be enabled. This is because
2909 * the BH enable code must have IRQs enabled so that it will not deadlock.
2910 * --BLG
2911 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912int dev_queue_xmit(struct sk_buff *skb)
2913{
2914 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002915 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 struct Qdisc *q;
2917 int rc = -ENOMEM;
2918
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002919 /* Disable soft irqs for various locks below. Also
2920 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002922 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
Neil Horman5bc14212011-11-22 05:10:51 +00002924 skb_update_prio(skb);
2925
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002926 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002927 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002928
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002930 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002932 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002934 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002935 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 }
2937
2938 /* The device has no queue. Common case for software devices:
2939 loopback, all the sorts of tunnels...
2940
Herbert Xu932ff272006-06-09 12:20:56 -07002941 Really, it is unlikely that netif_tx_lock protection is necessary
2942 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 counters.)
2944 However, it is possible, that they rely on protection
2945 made by us here.
2946
2947 Check this and shot the lock. It is not prone from deadlocks.
2948 Either shot noqueue qdisc, it is even simpler 8)
2949 */
2950 if (dev->flags & IFF_UP) {
2951 int cpu = smp_processor_id(); /* ok because BHs are off */
2952
David S. Millerc773e842008-07-08 23:13:53 -07002953 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Eric Dumazet745e20f2010-09-29 13:23:09 -07002955 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2956 goto recursion_alert;
2957
David S. Millerc773e842008-07-08 23:13:53 -07002958 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
Tom Herbert734664982011-11-28 16:32:44 +00002960 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002961 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002962 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002963 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002964 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002965 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 goto out;
2967 }
2968 }
David S. Millerc773e842008-07-08 23:13:53 -07002969 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002970 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2971 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 } else {
2973 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002974 * unfortunately
2975 */
2976recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002977 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2978 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 }
2980 }
2981
2982 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002983 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 kfree_skb(skb);
2986 return rc;
2987out:
Herbert Xud4828d82006-06-22 02:28:18 -07002988 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 return rc;
2990}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002991EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
2993
2994/*=======================================================================
2995 Receiver routines
2996 =======================================================================*/
2997
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002998int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002999EXPORT_SYMBOL(netdev_max_backlog);
3000
Eric Dumazet3b098e22010-05-15 23:57:10 -07003001int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07003002int netdev_budget __read_mostly = 300;
3003int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003005/* Called with irq disabled */
3006static inline void ____napi_schedule(struct softnet_data *sd,
3007 struct napi_struct *napi)
3008{
3009 list_add_tail(&napi->poll_list, &sd->poll_list);
3010 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3011}
3012
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003013/*
3014 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
Tom Herbertbdeab992011-08-14 19:45:55 +00003015 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
3016 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
3017 * if hash is a canonical 4-tuple hash over transport ports.
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003018 */
Tom Herbertbdeab992011-08-14 19:45:55 +00003019void __skb_get_rxhash(struct sk_buff *skb)
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003020{
Eric Dumazet4504b862011-11-28 05:23:23 +00003021 struct flow_keys keys;
3022 u32 hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003023
Eric Dumazet4504b862011-11-28 05:23:23 +00003024 if (!skb_flow_dissect(skb, &keys))
3025 return;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003026
Chema Gonzalez68622342012-09-07 13:40:50 +00003027 if (keys.ports)
Eric Dumazet4504b862011-11-28 05:23:23 +00003028 skb->l4_rxhash = 1;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003029
3030 /* get a consistent hash (same value on both flow directions) */
Chema Gonzalez68622342012-09-07 13:40:50 +00003031 if (((__force u32)keys.dst < (__force u32)keys.src) ||
3032 (((__force u32)keys.dst == (__force u32)keys.src) &&
3033 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
Eric Dumazet4504b862011-11-28 05:23:23 +00003034 swap(keys.dst, keys.src);
Chema Gonzalez68622342012-09-07 13:40:50 +00003035 swap(keys.port16[0], keys.port16[1]);
3036 }
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003037
Eric Dumazet4504b862011-11-28 05:23:23 +00003038 hash = jhash_3words((__force u32)keys.dst,
3039 (__force u32)keys.src,
3040 (__force u32)keys.ports, hashrnd);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003041 if (!hash)
3042 hash = 1;
3043
Tom Herbertbdeab992011-08-14 19:45:55 +00003044 skb->rxhash = hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003045}
3046EXPORT_SYMBOL(__skb_get_rxhash);
3047
Eric Dumazetdf334542010-03-24 19:13:54 +00003048#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07003049
3050/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003051struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07003052EXPORT_SYMBOL(rps_sock_flow_table);
3053
Ingo Molnarc5905af2012-02-24 08:31:31 +01003054struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00003055
Ben Hutchingsc4454772011-01-19 11:03:53 +00003056static struct rps_dev_flow *
3057set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3058 struct rps_dev_flow *rflow, u16 next_cpu)
3059{
Ben Hutchings09994d12011-10-03 04:42:46 +00003060 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003061#ifdef CONFIG_RFS_ACCEL
3062 struct netdev_rx_queue *rxqueue;
3063 struct rps_dev_flow_table *flow_table;
3064 struct rps_dev_flow *old_rflow;
3065 u32 flow_id;
3066 u16 rxq_index;
3067 int rc;
3068
3069 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003070 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3071 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003072 goto out;
3073 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3074 if (rxq_index == skb_get_rx_queue(skb))
3075 goto out;
3076
3077 rxqueue = dev->_rx + rxq_index;
3078 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3079 if (!flow_table)
3080 goto out;
3081 flow_id = skb->rxhash & flow_table->mask;
3082 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3083 rxq_index, flow_id);
3084 if (rc < 0)
3085 goto out;
3086 old_rflow = rflow;
3087 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003088 rflow->filter = rc;
3089 if (old_rflow->filter == rflow->filter)
3090 old_rflow->filter = RPS_NO_FILTER;
3091 out:
3092#endif
3093 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003094 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003095 }
3096
Ben Hutchings09994d12011-10-03 04:42:46 +00003097 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003098 return rflow;
3099}
3100
Tom Herbert0a9627f2010-03-16 08:03:29 +00003101/*
3102 * get_rps_cpu is called from netif_receive_skb and returns the target
3103 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003104 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003105 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003106static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3107 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003108{
Tom Herbert0a9627f2010-03-16 08:03:29 +00003109 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003110 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07003111 struct rps_dev_flow_table *flow_table;
3112 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003113 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07003114 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003115
Tom Herbert0a9627f2010-03-16 08:03:29 +00003116 if (skb_rx_queue_recorded(skb)) {
3117 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003118 if (unlikely(index >= dev->real_num_rx_queues)) {
3119 WARN_ONCE(dev->real_num_rx_queues > 1,
3120 "%s received packet on queue %u, but number "
3121 "of RX queues is %u\n",
3122 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003123 goto done;
3124 }
3125 rxqueue = dev->_rx + index;
3126 } else
3127 rxqueue = dev->_rx;
3128
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003129 map = rcu_dereference(rxqueue->rps_map);
3130 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08003131 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003132 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003133 tcpu = map->cpus[0];
3134 if (cpu_online(tcpu))
3135 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003136 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003137 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003138 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003139 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003140 }
3141
Changli Gao2d47b452010-08-17 19:00:56 +00003142 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003143 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003144 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003145
Tom Herbertfec5e652010-04-16 16:01:27 -07003146 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3147 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3148 if (flow_table && sock_flow_table) {
3149 u16 next_cpu;
3150 struct rps_dev_flow *rflow;
3151
3152 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3153 tcpu = rflow->cpu;
3154
3155 next_cpu = sock_flow_table->ents[skb->rxhash &
3156 sock_flow_table->mask];
3157
3158 /*
3159 * If the desired CPU (where last recvmsg was done) is
3160 * different from current CPU (one in the rx-queue flow
3161 * table entry), switch if one of the following holds:
3162 * - Current CPU is unset (equal to RPS_NO_CPU).
3163 * - Current CPU is offline.
3164 * - The current CPU's queue tail has advanced beyond the
3165 * last packet that was enqueued using this table entry.
3166 * This guarantees that all previous packets for the flow
3167 * have been dequeued, thus preserving in order delivery.
3168 */
3169 if (unlikely(tcpu != next_cpu) &&
3170 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3171 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003172 rflow->last_qtail)) >= 0)) {
3173 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003174 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003175 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003176
Tom Herbertfec5e652010-04-16 16:01:27 -07003177 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3178 *rflowp = rflow;
3179 cpu = tcpu;
3180 goto done;
3181 }
3182 }
3183
Tom Herbert0a9627f2010-03-16 08:03:29 +00003184 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003185 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003186
3187 if (cpu_online(tcpu)) {
3188 cpu = tcpu;
3189 goto done;
3190 }
3191 }
3192
3193done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003194 return cpu;
3195}
3196
Ben Hutchingsc4454772011-01-19 11:03:53 +00003197#ifdef CONFIG_RFS_ACCEL
3198
3199/**
3200 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3201 * @dev: Device on which the filter was set
3202 * @rxq_index: RX queue index
3203 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3204 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3205 *
3206 * Drivers that implement ndo_rx_flow_steer() should periodically call
3207 * this function for each installed filter and remove the filters for
3208 * which it returns %true.
3209 */
3210bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3211 u32 flow_id, u16 filter_id)
3212{
3213 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3214 struct rps_dev_flow_table *flow_table;
3215 struct rps_dev_flow *rflow;
3216 bool expire = true;
3217 int cpu;
3218
3219 rcu_read_lock();
3220 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3221 if (flow_table && flow_id <= flow_table->mask) {
3222 rflow = &flow_table->flows[flow_id];
3223 cpu = ACCESS_ONCE(rflow->cpu);
3224 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3225 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3226 rflow->last_qtail) <
3227 (int)(10 * flow_table->mask)))
3228 expire = false;
3229 }
3230 rcu_read_unlock();
3231 return expire;
3232}
3233EXPORT_SYMBOL(rps_may_expire_flow);
3234
3235#endif /* CONFIG_RFS_ACCEL */
3236
Tom Herbert0a9627f2010-03-16 08:03:29 +00003237/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003238static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003239{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003240 struct softnet_data *sd = data;
3241
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003242 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003243 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003244}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003245
Tom Herbertfec5e652010-04-16 16:01:27 -07003246#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003247
3248/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003249 * Check if this softnet_data structure is another cpu one
3250 * If yes, queue it to our IPI list and return 1
3251 * If no, return 0
3252 */
3253static int rps_ipi_queued(struct softnet_data *sd)
3254{
3255#ifdef CONFIG_RPS
3256 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3257
3258 if (sd != mysd) {
3259 sd->rps_ipi_next = mysd->rps_ipi_list;
3260 mysd->rps_ipi_list = sd;
3261
3262 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3263 return 1;
3264 }
3265#endif /* CONFIG_RPS */
3266 return 0;
3267}
3268
3269/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003270 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3271 * queue (may be a remote CPU queue).
3272 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003273static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3274 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003275{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003276 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003277 unsigned long flags;
3278
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003279 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003280
3281 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003282
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003283 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003284 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3285 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003286enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003287 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003288 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003289 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003290 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003291 return NET_RX_SUCCESS;
3292 }
3293
Eric Dumazetebda37c22010-05-06 23:51:21 +00003294 /* Schedule NAPI for backlog device
3295 * We can use non atomic operation since we own the queue lock
3296 */
3297 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003298 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003299 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003300 }
3301 goto enqueue;
3302 }
3303
Changli Gaodee42872010-05-02 05:42:16 +00003304 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003305 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003306
Tom Herbert0a9627f2010-03-16 08:03:29 +00003307 local_irq_restore(flags);
3308
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003309 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003310 kfree_skb(skb);
3311 return NET_RX_DROP;
3312}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314/**
3315 * netif_rx - post buffer to the network code
3316 * @skb: buffer to post
3317 *
3318 * This function receives a packet from a device driver and queues it for
3319 * the upper (protocol) levels to process. It always succeeds. The buffer
3320 * may be dropped during processing for congestion control or by the
3321 * protocol layers.
3322 *
3323 * return values:
3324 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325 * NET_RX_DROP (packet was dropped)
3326 *
3327 */
3328
3329int netif_rx(struct sk_buff *skb)
3330{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003331 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332
3333 /* if netpoll wants it, pretend we never saw it */
3334 if (netpoll_rx(skb))
3335 return NET_RX_DROP;
3336
Eric Dumazet588f0332011-11-15 04:12:55 +00003337 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338
Koki Sanagicf66ba52010-08-23 18:45:02 +09003339 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003340#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003341 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003342 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003343 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344
Changli Gaocece1942010-08-07 20:35:43 -07003345 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003346 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003347
3348 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003349 if (cpu < 0)
3350 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003351
3352 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3353
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003354 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003355 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003356 } else
3357#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003358 {
3359 unsigned int qtail;
3360 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3361 put_cpu();
3362 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003363 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003365EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
3367int netif_rx_ni(struct sk_buff *skb)
3368{
3369 int err;
3370
3371 preempt_disable();
3372 err = netif_rx(skb);
3373 if (local_softirq_pending())
3374 do_softirq();
3375 preempt_enable();
3376
3377 return err;
3378}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379EXPORT_SYMBOL(netif_rx_ni);
3380
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381static void net_tx_action(struct softirq_action *h)
3382{
3383 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3384
3385 if (sd->completion_queue) {
3386 struct sk_buff *clist;
3387
3388 local_irq_disable();
3389 clist = sd->completion_queue;
3390 sd->completion_queue = NULL;
3391 local_irq_enable();
3392
3393 while (clist) {
3394 struct sk_buff *skb = clist;
3395 clist = clist->next;
3396
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003397 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003398 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 __kfree_skb(skb);
3400 }
3401 }
3402
3403 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003404 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405
3406 local_irq_disable();
3407 head = sd->output_queue;
3408 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003409 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 local_irq_enable();
3411
3412 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003413 struct Qdisc *q = head;
3414 spinlock_t *root_lock;
3415
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 head = head->next_sched;
3417
David S. Miller5fb66222008-08-02 20:02:43 -07003418 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003419 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003420 smp_mb__before_clear_bit();
3421 clear_bit(__QDISC_STATE_SCHED,
3422 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003423 qdisc_run(q);
3424 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003426 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003427 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003428 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003429 } else {
3430 smp_mb__before_clear_bit();
3431 clear_bit(__QDISC_STATE_SCHED,
3432 &q->state);
3433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 }
3435 }
3436 }
3437}
3438
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003439#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3440 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003441/* This hook is defined here for ATM LANE */
3442int (*br_fdb_test_addr_hook)(struct net_device *dev,
3443 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003444EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003445#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447#ifdef CONFIG_NET_CLS_ACT
3448/* TODO: Maybe we should just force sch_ingress to be compiled in
3449 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3450 * a compare and 2 stores extra right now if we dont have it on
3451 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003452 * NOTE: This doesn't stop any functionality; if you dont have
3453 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 *
3455 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003456static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003459 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003460 int result = TC_ACT_OK;
3461 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003462
Stephen Hemmingerde384832010-08-01 00:33:23 -07003463 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003464 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3465 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003466 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 }
3468
Herbert Xuf697c3e2007-10-14 00:38:47 -07003469 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3470 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3471
David S. Miller83874002008-07-17 00:53:03 -07003472 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003473 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003474 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003475 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3476 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003477 spin_unlock(qdisc_lock(q));
3478 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003479
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 return result;
3481}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003482
3483static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3484 struct packet_type **pt_prev,
3485 int *ret, struct net_device *orig_dev)
3486{
Eric Dumazet24824a02010-10-02 06:11:55 +00003487 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3488
3489 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003490 goto out;
3491
3492 if (*pt_prev) {
3493 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3494 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003495 }
3496
Eric Dumazet24824a02010-10-02 06:11:55 +00003497 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003498 case TC_ACT_SHOT:
3499 case TC_ACT_STOLEN:
3500 kfree_skb(skb);
3501 return NULL;
3502 }
3503
3504out:
3505 skb->tc_verd = 0;
3506 return skb;
3507}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508#endif
3509
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003510/**
3511 * netdev_rx_handler_register - register receive handler
3512 * @dev: device to register a handler for
3513 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003514 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003515 *
3516 * Register a receive hander for a device. This handler will then be
3517 * called from __netif_receive_skb. A negative errno code is returned
3518 * on a failure.
3519 *
3520 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003521 *
3522 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003523 */
3524int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003525 rx_handler_func_t *rx_handler,
3526 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003527{
3528 ASSERT_RTNL();
3529
3530 if (dev->rx_handler)
3531 return -EBUSY;
3532
Jiri Pirko93e2c322010-06-10 03:34:59 +00003533 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003534 rcu_assign_pointer(dev->rx_handler, rx_handler);
3535
3536 return 0;
3537}
3538EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3539
3540/**
3541 * netdev_rx_handler_unregister - unregister receive handler
3542 * @dev: device to unregister a handler from
3543 *
3544 * Unregister a receive hander from a device.
3545 *
3546 * The caller must hold the rtnl_mutex.
3547 */
3548void netdev_rx_handler_unregister(struct net_device *dev)
3549{
3550
3551 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003552 RCU_INIT_POINTER(dev->rx_handler, NULL);
3553 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003554}
3555EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3556
Mel Gormanb4b9e352012-07-31 16:44:26 -07003557/*
3558 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3559 * the special handling of PFMEMALLOC skbs.
3560 */
3561static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3562{
3563 switch (skb->protocol) {
3564 case __constant_htons(ETH_P_ARP):
3565 case __constant_htons(ETH_P_IP):
3566 case __constant_htons(ETH_P_IPV6):
3567 case __constant_htons(ETH_P_8021Q):
3568 return true;
3569 default:
3570 return false;
3571 }
3572}
3573
Eric Dumazet10f744d2010-03-28 23:07:20 -07003574static int __netif_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575{
3576 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003577 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003578 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003579 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003580 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003582 __be16 type;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003583 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584
Eric Dumazet588f0332011-11-15 04:12:55 +00003585 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003586
Koki Sanagicf66ba52010-08-23 18:45:02 +09003587 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003588
Mel Gormanb4b9e352012-07-31 16:44:26 -07003589 /*
3590 * PFMEMALLOC skbs are special, they should
3591 * - be delivered to SOCK_MEMALLOC sockets only
3592 * - stay away from userspace
3593 * - have bounded memory usage
3594 *
3595 * Use PF_MEMALLOC as this saves us from propagating the allocation
3596 * context down to all allocation sites.
3597 */
3598 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3599 current->flags |= PF_MEMALLOC;
3600
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003602 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003603 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003605 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003606
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003607 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003608 if (!skb_transport_header_was_set(skb))
3609 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003610 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611
3612 pt_prev = NULL;
3613
3614 rcu_read_lock();
3615
David S. Miller63d8ea72011-02-28 10:48:59 -08003616another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003617 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003618
3619 __this_cpu_inc(softnet_data.processed);
3620
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003621 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3622 skb = vlan_untag(skb);
3623 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003624 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003625 }
3626
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627#ifdef CONFIG_NET_CLS_ACT
3628 if (skb->tc_verd & TC_NCLS) {
3629 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3630 goto ncls;
3631 }
3632#endif
3633
Mel Gormanb4b9e352012-07-31 16:44:26 -07003634 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3635 goto skip_taps;
3636
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003638 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003639 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003640 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 pt_prev = ptype;
3642 }
3643 }
3644
Mel Gormanb4b9e352012-07-31 16:44:26 -07003645skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003647 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3648 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003649 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650ncls:
3651#endif
3652
Mel Gormanb4b9e352012-07-31 16:44:26 -07003653 if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3654 && !skb_pfmemalloc_protocol(skb))
3655 goto drop;
3656
John Fastabend24257172011-10-10 09:16:41 +00003657 if (vlan_tx_tag_present(skb)) {
3658 if (pt_prev) {
3659 ret = deliver_skb(skb, pt_prev, orig_dev);
3660 pt_prev = NULL;
3661 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003662 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003663 goto another_round;
3664 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003665 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003666 }
3667
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003668 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003669 if (rx_handler) {
3670 if (pt_prev) {
3671 ret = deliver_skb(skb, pt_prev, orig_dev);
3672 pt_prev = NULL;
3673 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003674 switch (rx_handler(&skb)) {
3675 case RX_HANDLER_CONSUMED:
Mel Gormanb4b9e352012-07-31 16:44:26 -07003676 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003677 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003678 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003679 case RX_HANDLER_EXACT:
3680 deliver_exact = true;
3681 case RX_HANDLER_PASS:
3682 break;
3683 default:
3684 BUG();
3685 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003688 if (vlan_tx_nonzero_tag_present(skb))
3689 skb->pkt_type = PACKET_OTHERHOST;
3690
David S. Miller63d8ea72011-02-28 10:48:59 -08003691 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003692 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003693
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003695 list_for_each_entry_rcu(ptype,
3696 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003697 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003698 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3699 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003700 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003701 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702 pt_prev = ptype;
3703 }
3704 }
3705
3706 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003707 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003708 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003709 else
3710 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003712drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003713 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 kfree_skb(skb);
3715 /* Jamal, now you will not able to escape explaining
3716 * me how you were going to use this. :-)
3717 */
3718 ret = NET_RX_DROP;
3719 }
3720
Mel Gormanb4b9e352012-07-31 16:44:26 -07003721unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003723out:
3724 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 return ret;
3726}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003727
3728/**
3729 * netif_receive_skb - process receive buffer from network
3730 * @skb: buffer to process
3731 *
3732 * netif_receive_skb() is the main receive data processing function.
3733 * It always succeeds. The buffer may be dropped during processing
3734 * for congestion control or by the protocol layers.
3735 *
3736 * This function may only be called from softirq context and interrupts
3737 * should be enabled.
3738 *
3739 * Return values (usually ignored):
3740 * NET_RX_SUCCESS: no congestion
3741 * NET_RX_DROP: packet was dropped
3742 */
3743int netif_receive_skb(struct sk_buff *skb)
3744{
Eric Dumazet588f0332011-11-15 04:12:55 +00003745 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003746
Richard Cochranc1f19b52010-07-17 08:49:36 +00003747 if (skb_defer_rx_timestamp(skb))
3748 return NET_RX_SUCCESS;
3749
Eric Dumazetdf334542010-03-24 19:13:54 +00003750#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003751 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003752 struct rps_dev_flow voidflow, *rflow = &voidflow;
3753 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003754
Eric Dumazet3b098e22010-05-15 23:57:10 -07003755 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003756
Eric Dumazet3b098e22010-05-15 23:57:10 -07003757 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003758
Eric Dumazet3b098e22010-05-15 23:57:10 -07003759 if (cpu >= 0) {
3760 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3761 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003762 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003763 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003764 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003765 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003766#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003767 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003768}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003769EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770
Eric Dumazet88751272010-04-19 05:07:33 +00003771/* Network device is going away, flush any packets still pending
3772 * Called with irqs disabled.
3773 */
Changli Gao152102c2010-03-30 20:16:22 +00003774static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003775{
Changli Gao152102c2010-03-30 20:16:22 +00003776 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003777 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003778 struct sk_buff *skb, *tmp;
3779
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003780 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003781 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003782 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003783 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003784 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003785 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003786 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003787 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003788 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003789
3790 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3791 if (skb->dev == dev) {
3792 __skb_unlink(skb, &sd->process_queue);
3793 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003794 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003795 }
3796 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003797}
3798
Herbert Xud565b0a2008-12-15 23:38:52 -08003799static int napi_gro_complete(struct sk_buff *skb)
3800{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003801 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003802 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003803 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003804 int err = -ENOENT;
3805
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003806 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3807
Herbert Xufc59f9a2009-04-14 15:11:06 -07003808 if (NAPI_GRO_CB(skb)->count == 1) {
3809 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003810 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003811 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003812
3813 rcu_read_lock();
3814 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003815 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003816 continue;
3817
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003818 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003819 break;
3820 }
3821 rcu_read_unlock();
3822
3823 if (err) {
3824 WARN_ON(&ptype->list == head);
3825 kfree_skb(skb);
3826 return NET_RX_SUCCESS;
3827 }
3828
3829out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003830 return netif_receive_skb(skb);
3831}
3832
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003833/* napi->gro_list contains packets ordered by age.
3834 * youngest packets at the head of it.
3835 * Complete skbs in reverse order to reduce latencies.
3836 */
3837void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003838{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003839 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003840
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003841 /* scan list and build reverse chain */
3842 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3843 skb->prev = prev;
3844 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003845 }
3846
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003847 for (skb = prev; skb; skb = prev) {
3848 skb->next = NULL;
3849
3850 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3851 return;
3852
3853 prev = skb->prev;
3854 napi_gro_complete(skb);
3855 napi->gro_count--;
3856 }
3857
Herbert Xud565b0a2008-12-15 23:38:52 -08003858 napi->gro_list = NULL;
3859}
Eric Dumazet86cac582010-08-31 18:25:32 +00003860EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003861
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003862static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3863{
3864 struct sk_buff *p;
3865 unsigned int maclen = skb->dev->hard_header_len;
3866
3867 for (p = napi->gro_list; p; p = p->next) {
3868 unsigned long diffs;
3869
3870 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3871 diffs |= p->vlan_tci ^ skb->vlan_tci;
3872 if (maclen == ETH_HLEN)
3873 diffs |= compare_ether_header(skb_mac_header(p),
3874 skb_gro_mac_header(skb));
3875 else if (!diffs)
3876 diffs = memcmp(skb_mac_header(p),
3877 skb_gro_mac_header(skb),
3878 maclen);
3879 NAPI_GRO_CB(p)->same_flow = !diffs;
3880 NAPI_GRO_CB(p)->flush = 0;
3881 }
3882}
3883
Rami Rosenbb728822012-11-28 21:55:25 +00003884static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003885{
3886 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003887 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003888 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003889 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003890 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08003891 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003892 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003893
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003894 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003895 goto normal;
3896
David S. Miller21dc3302010-08-23 00:13:46 -07003897 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003898 goto normal;
3899
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003900 gro_list_prepare(napi, skb);
3901
Herbert Xud565b0a2008-12-15 23:38:52 -08003902 rcu_read_lock();
3903 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003904 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003905 continue;
3906
Herbert Xu86911732009-01-29 14:19:50 +00003907 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08003908 mac_len = skb->network_header - skb->mac_header;
3909 skb->mac_len = mac_len;
3910 NAPI_GRO_CB(skb)->same_flow = 0;
3911 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003912 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003913
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003914 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003915 break;
3916 }
3917 rcu_read_unlock();
3918
3919 if (&ptype->list == head)
3920 goto normal;
3921
Herbert Xu0da2afd52008-12-26 14:57:42 -08003922 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003923 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003924
Herbert Xud565b0a2008-12-15 23:38:52 -08003925 if (pp) {
3926 struct sk_buff *nskb = *pp;
3927
3928 *pp = nskb->next;
3929 nskb->next = NULL;
3930 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003931 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003932 }
3933
Herbert Xu0da2afd52008-12-26 14:57:42 -08003934 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003935 goto ok;
3936
Herbert Xu4ae55442009-02-08 18:00:36 +00003937 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003938 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003939
Herbert Xu4ae55442009-02-08 18:00:36 +00003940 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003941 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003942 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003943 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003944 skb->next = napi->gro_list;
3945 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003946 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003947
Herbert Xuad0f9902009-02-01 01:24:55 -08003948pull:
Herbert Xucb189782009-05-26 18:50:31 +00003949 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3950 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3951
3952 BUG_ON(skb->end - skb->tail < grow);
3953
3954 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3955
3956 skb->tail += grow;
3957 skb->data_len -= grow;
3958
3959 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003960 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003961
Eric Dumazet9e903e02011-10-18 21:00:24 +00003962 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003963 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003964 memmove(skb_shinfo(skb)->frags,
3965 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003966 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003967 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003968 }
3969
Herbert Xud565b0a2008-12-15 23:38:52 -08003970ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003971 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003972
3973normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003974 ret = GRO_NORMAL;
3975 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003976}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003977
Herbert Xu96e93ea2009-01-06 10:49:34 -08003978
Rami Rosenbb728822012-11-28 21:55:25 +00003979static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003980{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003981 switch (ret) {
3982 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003983 if (netif_receive_skb(skb))
3984 ret = GRO_DROP;
3985 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003986
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003987 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003988 kfree_skb(skb);
3989 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003990
Eric Dumazetdaa86542012-04-19 07:07:40 +00003991 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003992 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3993 kmem_cache_free(skbuff_head_cache, skb);
3994 else
3995 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003996 break;
3997
Ben Hutchings5b252f02009-10-29 07:17:09 +00003998 case GRO_HELD:
3999 case GRO_MERGED:
4000 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004001 }
4002
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004003 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004004}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004005
Eric Dumazetca07e432012-10-06 22:28:06 +00004006static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00004007{
Eric Dumazetca07e432012-10-06 22:28:06 +00004008 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4009 const skb_frag_t *frag0 = &pinfo->frags[0];
4010
Herbert Xu78a478d2009-05-26 18:50:21 +00004011 NAPI_GRO_CB(skb)->data_offset = 0;
4012 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00004013 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00004014
Herbert Xu78d3fd02009-05-26 18:50:23 +00004015 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00004016 pinfo->nr_frags &&
4017 !PageHighMem(skb_frag_page(frag0))) {
4018 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4019 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00004020 }
Herbert Xu78a478d2009-05-26 18:50:21 +00004021}
Herbert Xu78a478d2009-05-26 18:50:21 +00004022
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004023gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004024{
Herbert Xu86911732009-01-29 14:19:50 +00004025 skb_gro_reset_offset(skb);
4026
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004027 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004028}
4029EXPORT_SYMBOL(napi_gro_receive);
4030
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004031static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004032{
Herbert Xu96e93ea2009-01-06 10:49:34 -08004033 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004034 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4035 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004036 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004037 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004038 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004039
4040 napi->skb = skb;
4041}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004042
Herbert Xu76620aa2009-04-16 02:02:07 -07004043struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004044{
Herbert Xu5d38a072009-01-04 16:13:40 -08004045 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004046
4047 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00004048 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
4049 if (skb)
4050 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004051 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004052 return skb;
4053}
Herbert Xu76620aa2009-04-16 02:02:07 -07004054EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004055
Rami Rosenbb728822012-11-28 21:55:25 +00004056static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004057 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004058{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004059 switch (ret) {
4060 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00004061 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00004062 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00004063
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004064 if (ret == GRO_HELD)
4065 skb_gro_pull(skb, -ETH_HLEN);
4066 else if (netif_receive_skb(skb))
4067 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004068 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004069
4070 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004071 case GRO_MERGED_FREE:
4072 napi_reuse_skb(napi, skb);
4073 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004074
4075 case GRO_MERGED:
4076 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004077 }
4078
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004079 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004080}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004081
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004082static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004083{
Herbert Xu76620aa2009-04-16 02:02:07 -07004084 struct sk_buff *skb = napi->skb;
4085 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00004086 unsigned int hlen;
4087 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07004088
4089 napi->skb = NULL;
4090
4091 skb_reset_mac_header(skb);
4092 skb_gro_reset_offset(skb);
4093
Herbert Xua5b1cf22009-05-26 18:50:28 +00004094 off = skb_gro_offset(skb);
4095 hlen = off + sizeof(*eth);
4096 eth = skb_gro_header_fast(skb, off);
4097 if (skb_gro_header_hard(skb, hlen)) {
4098 eth = skb_gro_header_slow(skb, hlen, off);
4099 if (unlikely(!eth)) {
4100 napi_reuse_skb(napi, skb);
4101 skb = NULL;
4102 goto out;
4103 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004104 }
4105
4106 skb_gro_pull(skb, sizeof(*eth));
4107
4108 /*
4109 * This works because the only protocols we care about don't require
4110 * special handling. We'll fix it up properly at the end.
4111 */
4112 skb->protocol = eth->h_proto;
4113
4114out:
4115 return skb;
4116}
Herbert Xu76620aa2009-04-16 02:02:07 -07004117
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004118gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004119{
4120 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004121
4122 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004123 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004124
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004125 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004126}
4127EXPORT_SYMBOL(napi_gro_frags);
4128
Eric Dumazete326bed2010-04-22 00:22:45 -07004129/*
4130 * net_rps_action sends any pending IPI's for rps.
4131 * Note: called with local irq disabled, but exits with local irq enabled.
4132 */
4133static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4134{
4135#ifdef CONFIG_RPS
4136 struct softnet_data *remsd = sd->rps_ipi_list;
4137
4138 if (remsd) {
4139 sd->rps_ipi_list = NULL;
4140
4141 local_irq_enable();
4142
4143 /* Send pending IPI's to kick RPS processing on remote cpus. */
4144 while (remsd) {
4145 struct softnet_data *next = remsd->rps_ipi_next;
4146
4147 if (cpu_online(remsd->cpu))
4148 __smp_call_function_single(remsd->cpu,
4149 &remsd->csd, 0);
4150 remsd = next;
4151 }
4152 } else
4153#endif
4154 local_irq_enable();
4155}
4156
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004157static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158{
4159 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004160 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161
Eric Dumazete326bed2010-04-22 00:22:45 -07004162#ifdef CONFIG_RPS
4163 /* Check if we have pending ipi, its better to send them now,
4164 * not waiting net_rx_action() end.
4165 */
4166 if (sd->rps_ipi_list) {
4167 local_irq_disable();
4168 net_rps_action_and_irq_enable(sd);
4169 }
4170#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004171 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004172 local_irq_disable();
4173 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004175 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176
Changli Gao6e7676c2010-04-27 15:07:33 -07004177 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004178 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004179 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004180 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004181 input_queue_head_incr(sd);
4182 if (++work >= quota) {
4183 local_irq_enable();
4184 return work;
4185 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004186 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187
Changli Gao6e7676c2010-04-27 15:07:33 -07004188 rps_lock(sd);
4189 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004190 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004191 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4192 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004193
Changli Gao6e7676c2010-04-27 15:07:33 -07004194 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004195 /*
4196 * Inline a custom version of __napi_complete().
4197 * only current cpu owns and manipulates this napi,
4198 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4199 * we can use a plain write instead of clear_bit(),
4200 * and we dont need an smp_mb() memory barrier.
4201 */
4202 list_del(&napi->poll_list);
4203 napi->state = 0;
4204
Changli Gao6e7676c2010-04-27 15:07:33 -07004205 quota = work + qlen;
4206 }
4207 rps_unlock(sd);
4208 }
4209 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004211 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212}
4213
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004214/**
4215 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004216 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004217 *
4218 * The entry's receive function will be scheduled to run
4219 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004220void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004221{
4222 unsigned long flags;
4223
4224 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004225 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004226 local_irq_restore(flags);
4227}
4228EXPORT_SYMBOL(__napi_schedule);
4229
Herbert Xud565b0a2008-12-15 23:38:52 -08004230void __napi_complete(struct napi_struct *n)
4231{
4232 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4233 BUG_ON(n->gro_list);
4234
4235 list_del(&n->poll_list);
4236 smp_mb__before_clear_bit();
4237 clear_bit(NAPI_STATE_SCHED, &n->state);
4238}
4239EXPORT_SYMBOL(__napi_complete);
4240
4241void napi_complete(struct napi_struct *n)
4242{
4243 unsigned long flags;
4244
4245 /*
4246 * don't let napi dequeue from the cpu poll list
4247 * just in case its running on a different cpu
4248 */
4249 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4250 return;
4251
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004252 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004253 local_irq_save(flags);
4254 __napi_complete(n);
4255 local_irq_restore(flags);
4256}
4257EXPORT_SYMBOL(napi_complete);
4258
4259void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4260 int (*poll)(struct napi_struct *, int), int weight)
4261{
4262 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004263 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004264 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004265 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004266 napi->poll = poll;
4267 napi->weight = weight;
4268 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004269 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004270#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004271 spin_lock_init(&napi->poll_lock);
4272 napi->poll_owner = -1;
4273#endif
4274 set_bit(NAPI_STATE_SCHED, &napi->state);
4275}
4276EXPORT_SYMBOL(netif_napi_add);
4277
4278void netif_napi_del(struct napi_struct *napi)
4279{
4280 struct sk_buff *skb, *next;
4281
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004282 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004283 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004284
4285 for (skb = napi->gro_list; skb; skb = next) {
4286 next = skb->next;
4287 skb->next = NULL;
4288 kfree_skb(skb);
4289 }
4290
4291 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004292 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004293}
4294EXPORT_SYMBOL(netif_napi_del);
4295
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296static void net_rx_action(struct softirq_action *h)
4297{
Eric Dumazete326bed2010-04-22 00:22:45 -07004298 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004299 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004300 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004301 void *have;
4302
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 local_irq_disable();
4304
Eric Dumazete326bed2010-04-22 00:22:45 -07004305 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004306 struct napi_struct *n;
4307 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004309 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004310 * Allow this to run for 2 jiffies since which will allow
4311 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004312 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004313 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 goto softnet_break;
4315
4316 local_irq_enable();
4317
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004318 /* Even though interrupts have been re-enabled, this
4319 * access is safe because interrupts can only add new
4320 * entries to the tail of this list, and only ->poll()
4321 * calls can remove this head entry from the list.
4322 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004323 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004325 have = netpoll_poll_lock(n);
4326
4327 weight = n->weight;
4328
David S. Miller0a7606c2007-10-29 21:28:47 -07004329 /* This NAPI_STATE_SCHED test is for avoiding a race
4330 * with netpoll's poll_napi(). Only the entity which
4331 * obtains the lock and sees NAPI_STATE_SCHED set will
4332 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004333 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004334 */
4335 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004336 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004337 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004338 trace_napi_poll(n);
4339 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004340
4341 WARN_ON_ONCE(work > weight);
4342
4343 budget -= work;
4344
4345 local_irq_disable();
4346
4347 /* Drivers must not modify the NAPI state if they
4348 * consume the entire weight. In such cases this code
4349 * still "owns" the NAPI instance and therefore can
4350 * move the instance around on the list at-will.
4351 */
David S. Millerfed17f32008-01-07 21:00:40 -08004352 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004353 if (unlikely(napi_disable_pending(n))) {
4354 local_irq_enable();
4355 napi_complete(n);
4356 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004357 } else {
4358 if (n->gro_list) {
4359 /* flush too old packets
4360 * If HZ < 1000, flush all packets.
4361 */
4362 local_irq_enable();
4363 napi_gro_flush(n, HZ >= 1000);
4364 local_irq_disable();
4365 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004366 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004367 }
David S. Millerfed17f32008-01-07 21:00:40 -08004368 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004369
4370 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 }
4372out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004373 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004374
Chris Leechdb217332006-06-17 21:24:58 -07004375#ifdef CONFIG_NET_DMA
4376 /*
4377 * There may not be any more sk_buffs coming right now, so push
4378 * any pending DMA copies to hardware
4379 */
Dan Williams2ba05622009-01-06 11:38:14 -07004380 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004381#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004382
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383 return;
4384
4385softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004386 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4388 goto out;
4389}
4390
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004391static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392
4393/**
4394 * register_gifconf - register a SIOCGIF handler
4395 * @family: Address family
4396 * @gifconf: Function handler
4397 *
4398 * Register protocol dependent address dumping routines. The handler
4399 * that is passed must not be freed or reused until it has been replaced
4400 * by another handler.
4401 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004402int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403{
4404 if (family >= NPROTO)
4405 return -EINVAL;
4406 gifconf_list[family] = gifconf;
4407 return 0;
4408}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004409EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410
4411
4412/*
4413 * Map an interface index to its name (SIOCGIFNAME)
4414 */
4415
4416/*
4417 * We need this ioctl for efficient implementation of the
4418 * if_indextoname() function required by the IPv6 API. Without
4419 * it, we would have to search all the interfaces to find a
4420 * match. --pb
4421 */
4422
Eric W. Biederman881d9662007-09-17 11:56:21 -07004423static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424{
4425 struct net_device *dev;
4426 struct ifreq ifr;
Brian Haleyc91f6df2012-11-26 05:21:08 +00004427 unsigned seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428
4429 /*
4430 * Fetch the caller's info block.
4431 */
4432
4433 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4434 return -EFAULT;
4435
Brian Haleyc91f6df2012-11-26 05:21:08 +00004436retry:
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004437 seq = read_seqcount_begin(&devnet_rename_seq);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004438 rcu_read_lock();
4439 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004441 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 return -ENODEV;
4443 }
4444
4445 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004446 rcu_read_unlock();
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004447 if (read_seqcount_retry(&devnet_rename_seq, seq))
Brian Haleyc91f6df2012-11-26 05:21:08 +00004448 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449
4450 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4451 return -EFAULT;
4452 return 0;
4453}
4454
4455/*
4456 * Perform a SIOCGIFCONF call. This structure will change
4457 * size eventually, and there is nothing I can do about it.
4458 * Thus we will need a 'compatibility mode'.
4459 */
4460
Eric W. Biederman881d9662007-09-17 11:56:21 -07004461static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462{
4463 struct ifconf ifc;
4464 struct net_device *dev;
4465 char __user *pos;
4466 int len;
4467 int total;
4468 int i;
4469
4470 /*
4471 * Fetch the caller's info block.
4472 */
4473
4474 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4475 return -EFAULT;
4476
4477 pos = ifc.ifc_buf;
4478 len = ifc.ifc_len;
4479
4480 /*
4481 * Loop over the interfaces, and write an info block for each.
4482 */
4483
4484 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004485 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 for (i = 0; i < NPROTO; i++) {
4487 if (gifconf_list[i]) {
4488 int done;
4489 if (!pos)
4490 done = gifconf_list[i](dev, NULL, 0);
4491 else
4492 done = gifconf_list[i](dev, pos + total,
4493 len - total);
4494 if (done < 0)
4495 return -EFAULT;
4496 total += done;
4497 }
4498 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
4501 /*
4502 * All done. Write the updated control block back to the caller.
4503 */
4504 ifc.ifc_len = total;
4505
4506 /*
4507 * Both BSD and Solaris return 0 here, so we do too.
4508 */
4509 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4510}
4511
4512#ifdef CONFIG_PROC_FS
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004513
Eric Dumazet2def16a2012-04-02 22:33:02 +00004514#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004515
4516#define get_bucket(x) ((x) >> BUCKET_SPACE)
4517#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4518#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4519
Eric Dumazet2def16a2012-04-02 22:33:02 +00004520static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004521{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004522 struct net *net = seq_file_net(seq);
4523 struct net_device *dev;
4524 struct hlist_node *p;
4525 struct hlist_head *h;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004526 unsigned int count = 0, offset = get_offset(*pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004527
Eric Dumazet2def16a2012-04-02 22:33:02 +00004528 h = &net->dev_name_head[get_bucket(*pos)];
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004529 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004530 if (++count == offset)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004531 return dev;
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004532 }
4533
4534 return NULL;
4535}
4536
Eric Dumazet2def16a2012-04-02 22:33:02 +00004537static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004538{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004539 struct net_device *dev;
4540 unsigned int bucket;
4541
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004542 do {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004543 dev = dev_from_same_bucket(seq, pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004544 if (dev)
4545 return dev;
4546
Eric Dumazet2def16a2012-04-02 22:33:02 +00004547 bucket = get_bucket(*pos) + 1;
4548 *pos = set_bucket_offset(bucket, 1);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004549 } while (bucket < NETDEV_HASHENTRIES);
4550
4551 return NULL;
4552}
4553
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554/*
4555 * This is invoked by the /proc filesystem handler to display a device
4556 * in detail.
4557 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004559 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004561 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07004562 if (!*pos)
4563 return SEQ_START_TOKEN;
4564
Eric Dumazet2def16a2012-04-02 22:33:02 +00004565 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004566 return NULL;
Pavel Emelianov7562f872007-05-03 15:13:45 -07004567
Eric Dumazet2def16a2012-04-02 22:33:02 +00004568 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569}
4570
4571void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4572{
4573 ++*pos;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004574 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575}
4576
4577void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004578 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004580 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581}
4582
4583static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4584{
Eric Dumazet28172732010-07-07 14:58:56 -07004585 struct rtnl_link_stats64 temp;
4586 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00004588 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4589 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
Rusty Russell5a1b5892007-04-28 21:04:03 -07004590 dev->name, stats->rx_bytes, stats->rx_packets,
4591 stats->rx_errors,
4592 stats->rx_dropped + stats->rx_missed_errors,
4593 stats->rx_fifo_errors,
4594 stats->rx_length_errors + stats->rx_over_errors +
4595 stats->rx_crc_errors + stats->rx_frame_errors,
4596 stats->rx_compressed, stats->multicast,
4597 stats->tx_bytes, stats->tx_packets,
4598 stats->tx_errors, stats->tx_dropped,
4599 stats->tx_fifo_errors, stats->collisions,
4600 stats->tx_carrier_errors +
4601 stats->tx_aborted_errors +
4602 stats->tx_window_errors +
4603 stats->tx_heartbeat_errors,
4604 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605}
4606
4607/*
4608 * Called from the PROCfs module. This now uses the new arbitrary sized
4609 * /proc/net interface to create /proc/net/dev
4610 */
4611static int dev_seq_show(struct seq_file *seq, void *v)
4612{
4613 if (v == SEQ_START_TOKEN)
4614 seq_puts(seq, "Inter-| Receive "
4615 " | Transmit\n"
4616 " face |bytes packets errs drop fifo frame "
4617 "compressed multicast|bytes packets errs "
4618 "drop fifo colls carrier compressed\n");
4619 else
4620 dev_seq_printf_stats(seq, v);
4621 return 0;
4622}
4623
Changli Gaodee42872010-05-02 05:42:16 +00004624static struct softnet_data *softnet_get_online(loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625{
Changli Gaodee42872010-05-02 05:42:16 +00004626 struct softnet_data *sd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004628 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004629 if (cpu_online(*pos)) {
Changli Gaodee42872010-05-02 05:42:16 +00004630 sd = &per_cpu(softnet_data, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 break;
4632 } else
4633 ++*pos;
Changli Gaodee42872010-05-02 05:42:16 +00004634 return sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635}
4636
4637static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4638{
4639 return softnet_get_online(pos);
4640}
4641
4642static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4643{
4644 ++*pos;
4645 return softnet_get_online(pos);
4646}
4647
4648static void softnet_seq_stop(struct seq_file *seq, void *v)
4649{
4650}
4651
4652static int softnet_seq_show(struct seq_file *seq, void *v)
4653{
Changli Gaodee42872010-05-02 05:42:16 +00004654 struct softnet_data *sd = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655
Tom Herbert0a9627f2010-03-16 08:03:29 +00004656 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Changli Gaodee42872010-05-02 05:42:16 +00004657 sd->processed, sd->dropped, sd->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07004658 0, 0, 0, 0, /* was fastroute */
Changli Gaodee42872010-05-02 05:42:16 +00004659 sd->cpu_collision, sd->received_rps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660 return 0;
4661}
4662
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004663static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 .start = dev_seq_start,
4665 .next = dev_seq_next,
4666 .stop = dev_seq_stop,
4667 .show = dev_seq_show,
4668};
4669
4670static int dev_seq_open(struct inode *inode, struct file *file)
4671{
Denis V. Luneve372c412007-11-19 22:31:54 -08004672 return seq_open_net(inode, file, &dev_seq_ops,
Eric Dumazet2def16a2012-04-02 22:33:02 +00004673 sizeof(struct seq_net_private));
Anton Blanchard5cac98d2011-11-27 21:14:46 +00004674}
4675
Arjan van de Ven9a321442007-02-12 00:55:35 -08004676static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677 .owner = THIS_MODULE,
4678 .open = dev_seq_open,
4679 .read = seq_read,
4680 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08004681 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682};
4683
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004684static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 .start = softnet_seq_start,
4686 .next = softnet_seq_next,
4687 .stop = softnet_seq_stop,
4688 .show = softnet_seq_show,
4689};
4690
4691static int softnet_seq_open(struct inode *inode, struct file *file)
4692{
4693 return seq_open(file, &softnet_seq_ops);
4694}
4695
Arjan van de Ven9a321442007-02-12 00:55:35 -08004696static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 .owner = THIS_MODULE,
4698 .open = softnet_seq_open,
4699 .read = seq_read,
4700 .llseek = seq_lseek,
4701 .release = seq_release,
4702};
4703
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004704static void *ptype_get_idx(loff_t pos)
4705{
4706 struct packet_type *pt = NULL;
4707 loff_t i = 0;
4708 int t;
4709
4710 list_for_each_entry_rcu(pt, &ptype_all, list) {
4711 if (i == pos)
4712 return pt;
4713 ++i;
4714 }
4715
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004716 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004717 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4718 if (i == pos)
4719 return pt;
4720 ++i;
4721 }
4722 }
4723 return NULL;
4724}
4725
4726static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004727 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004728{
4729 rcu_read_lock();
4730 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4731}
4732
4733static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4734{
4735 struct packet_type *pt;
4736 struct list_head *nxt;
4737 int hash;
4738
4739 ++*pos;
4740 if (v == SEQ_START_TOKEN)
4741 return ptype_get_idx(0);
4742
4743 pt = v;
4744 nxt = pt->list.next;
4745 if (pt->type == htons(ETH_P_ALL)) {
4746 if (nxt != &ptype_all)
4747 goto found;
4748 hash = 0;
4749 nxt = ptype_base[0].next;
4750 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004751 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004752
4753 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004754 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004755 return NULL;
4756 nxt = ptype_base[hash].next;
4757 }
4758found:
4759 return list_entry(nxt, struct packet_type, list);
4760}
4761
4762static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004763 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004764{
4765 rcu_read_unlock();
4766}
4767
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004768static int ptype_seq_show(struct seq_file *seq, void *v)
4769{
4770 struct packet_type *pt = v;
4771
4772 if (v == SEQ_START_TOKEN)
4773 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004774 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004775 if (pt->type == htons(ETH_P_ALL))
4776 seq_puts(seq, "ALL ");
4777 else
4778 seq_printf(seq, "%04x", ntohs(pt->type));
4779
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08004780 seq_printf(seq, " %-8s %pF\n",
4781 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004782 }
4783
4784 return 0;
4785}
4786
4787static const struct seq_operations ptype_seq_ops = {
4788 .start = ptype_seq_start,
4789 .next = ptype_seq_next,
4790 .stop = ptype_seq_stop,
4791 .show = ptype_seq_show,
4792};
4793
4794static int ptype_seq_open(struct inode *inode, struct file *file)
4795{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004796 return seq_open_net(inode, file, &ptype_seq_ops,
4797 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004798}
4799
4800static const struct file_operations ptype_seq_fops = {
4801 .owner = THIS_MODULE,
4802 .open = ptype_seq_open,
4803 .read = seq_read,
4804 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004805 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004806};
4807
4808
Pavel Emelyanov46650792007-10-08 20:38:39 -07004809static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004810{
4811 int rc = -ENOMEM;
4812
Eric W. Biederman881d9662007-09-17 11:56:21 -07004813 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004815 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004817 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004818 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004819
Eric W. Biederman881d9662007-09-17 11:56:21 -07004820 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004821 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822 rc = 0;
4823out:
4824 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004825out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004826 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004828 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004830 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 goto out;
4832}
Eric W. Biederman881d9662007-09-17 11:56:21 -07004833
Pavel Emelyanov46650792007-10-08 20:38:39 -07004834static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004835{
4836 wext_proc_exit(net);
4837
4838 proc_net_remove(net, "ptype");
4839 proc_net_remove(net, "softnet_stat");
4840 proc_net_remove(net, "dev");
4841}
4842
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004843static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004844 .init = dev_proc_net_init,
4845 .exit = dev_proc_net_exit,
4846};
4847
4848static int __init dev_proc_init(void)
4849{
4850 return register_pernet_subsys(&dev_proc_ops);
4851}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852#else
4853#define dev_proc_init() 0
4854#endif /* CONFIG_PROC_FS */
4855
4856
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004857struct netdev_upper {
4858 struct net_device *dev;
4859 bool master;
4860 struct list_head list;
4861 struct rcu_head rcu;
4862 struct list_head search_list;
4863};
4864
4865static void __append_search_uppers(struct list_head *search_list,
4866 struct net_device *dev)
4867{
4868 struct netdev_upper *upper;
4869
4870 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4871 /* check if this upper is not already in search list */
4872 if (list_empty(&upper->search_list))
4873 list_add_tail(&upper->search_list, search_list);
4874 }
4875}
4876
4877static bool __netdev_search_upper_dev(struct net_device *dev,
4878 struct net_device *upper_dev)
4879{
4880 LIST_HEAD(search_list);
4881 struct netdev_upper *upper;
4882 struct netdev_upper *tmp;
4883 bool ret = false;
4884
4885 __append_search_uppers(&search_list, dev);
4886 list_for_each_entry(upper, &search_list, search_list) {
4887 if (upper->dev == upper_dev) {
4888 ret = true;
4889 break;
4890 }
4891 __append_search_uppers(&search_list, upper->dev);
4892 }
4893 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4894 INIT_LIST_HEAD(&upper->search_list);
4895 return ret;
4896}
4897
4898static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4899 struct net_device *upper_dev)
4900{
4901 struct netdev_upper *upper;
4902
4903 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4904 if (upper->dev == upper_dev)
4905 return upper;
4906 }
4907 return NULL;
4908}
4909
4910/**
4911 * netdev_has_upper_dev - Check if device is linked to an upper device
4912 * @dev: device
4913 * @upper_dev: upper device to check
4914 *
4915 * Find out if a device is linked to specified upper device and return true
4916 * in case it is. Note that this checks only immediate upper device,
4917 * not through a complete stack of devices. The caller must hold the RTNL lock.
4918 */
4919bool netdev_has_upper_dev(struct net_device *dev,
4920 struct net_device *upper_dev)
4921{
4922 ASSERT_RTNL();
4923
4924 return __netdev_find_upper(dev, upper_dev);
4925}
4926EXPORT_SYMBOL(netdev_has_upper_dev);
4927
4928/**
4929 * netdev_has_any_upper_dev - Check if device is linked to some device
4930 * @dev: device
4931 *
4932 * Find out if a device is linked to an upper device and return true in case
4933 * it is. The caller must hold the RTNL lock.
4934 */
4935bool netdev_has_any_upper_dev(struct net_device *dev)
4936{
4937 ASSERT_RTNL();
4938
4939 return !list_empty(&dev->upper_dev_list);
4940}
4941EXPORT_SYMBOL(netdev_has_any_upper_dev);
4942
4943/**
4944 * netdev_master_upper_dev_get - Get master upper device
4945 * @dev: device
4946 *
4947 * Find a master upper device and return pointer to it or NULL in case
4948 * it's not there. The caller must hold the RTNL lock.
4949 */
4950struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4951{
4952 struct netdev_upper *upper;
4953
4954 ASSERT_RTNL();
4955
4956 if (list_empty(&dev->upper_dev_list))
4957 return NULL;
4958
4959 upper = list_first_entry(&dev->upper_dev_list,
4960 struct netdev_upper, list);
4961 if (likely(upper->master))
4962 return upper->dev;
4963 return NULL;
4964}
4965EXPORT_SYMBOL(netdev_master_upper_dev_get);
4966
4967/**
4968 * netdev_master_upper_dev_get_rcu - Get master upper device
4969 * @dev: device
4970 *
4971 * Find a master upper device and return pointer to it or NULL in case
4972 * it's not there. The caller must hold the RCU read lock.
4973 */
4974struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4975{
4976 struct netdev_upper *upper;
4977
4978 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4979 struct netdev_upper, list);
4980 if (upper && likely(upper->master))
4981 return upper->dev;
4982 return NULL;
4983}
4984EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4985
4986static int __netdev_upper_dev_link(struct net_device *dev,
4987 struct net_device *upper_dev, bool master)
4988{
4989 struct netdev_upper *upper;
4990
4991 ASSERT_RTNL();
4992
4993 if (dev == upper_dev)
4994 return -EBUSY;
4995
4996 /* To prevent loops, check if dev is not upper device to upper_dev. */
4997 if (__netdev_search_upper_dev(upper_dev, dev))
4998 return -EBUSY;
4999
5000 if (__netdev_find_upper(dev, upper_dev))
5001 return -EEXIST;
5002
5003 if (master && netdev_master_upper_dev_get(dev))
5004 return -EBUSY;
5005
5006 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
5007 if (!upper)
5008 return -ENOMEM;
5009
5010 upper->dev = upper_dev;
5011 upper->master = master;
5012 INIT_LIST_HEAD(&upper->search_list);
5013
5014 /* Ensure that master upper link is always the first item in list. */
5015 if (master)
5016 list_add_rcu(&upper->list, &dev->upper_dev_list);
5017 else
5018 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
5019 dev_hold(upper_dev);
5020
5021 return 0;
5022}
5023
5024/**
5025 * netdev_upper_dev_link - Add a link to the upper device
5026 * @dev: device
5027 * @upper_dev: new upper device
5028 *
5029 * Adds a link to device which is upper to this one. The caller must hold
5030 * the RTNL lock. On a failure a negative errno code is returned.
5031 * On success the reference counts are adjusted and the function
5032 * returns zero.
5033 */
5034int netdev_upper_dev_link(struct net_device *dev,
5035 struct net_device *upper_dev)
5036{
5037 return __netdev_upper_dev_link(dev, upper_dev, false);
5038}
5039EXPORT_SYMBOL(netdev_upper_dev_link);
5040
5041/**
5042 * netdev_master_upper_dev_link - Add a master link to the upper device
5043 * @dev: device
5044 * @upper_dev: new upper device
5045 *
5046 * Adds a link to device which is upper to this one. In this case, only
5047 * one master upper device can be linked, although other non-master devices
5048 * might be linked as well. The caller must hold the RTNL lock.
5049 * On a failure a negative errno code is returned. On success the reference
5050 * counts are adjusted and the function returns zero.
5051 */
5052int netdev_master_upper_dev_link(struct net_device *dev,
5053 struct net_device *upper_dev)
5054{
5055 return __netdev_upper_dev_link(dev, upper_dev, true);
5056}
5057EXPORT_SYMBOL(netdev_master_upper_dev_link);
5058
5059/**
5060 * netdev_upper_dev_unlink - Removes a link to upper device
5061 * @dev: device
5062 * @upper_dev: new upper device
5063 *
5064 * Removes a link to device which is upper to this one. The caller must hold
5065 * the RTNL lock.
5066 */
5067void netdev_upper_dev_unlink(struct net_device *dev,
5068 struct net_device *upper_dev)
5069{
5070 struct netdev_upper *upper;
5071
5072 ASSERT_RTNL();
5073
5074 upper = __netdev_find_upper(dev, upper_dev);
5075 if (!upper)
5076 return;
5077 list_del_rcu(&upper->list);
5078 dev_put(upper_dev);
5079 kfree_rcu(upper, rcu);
5080}
5081EXPORT_SYMBOL(netdev_upper_dev_unlink);
5082
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005083static void dev_change_rx_flags(struct net_device *dev, int flags)
5084{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005085 const struct net_device_ops *ops = dev->netdev_ops;
5086
5087 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
5088 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005089}
5090
Wang Chendad9b332008-06-18 01:48:28 -07005091static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07005092{
Eric Dumazetb536db92011-11-30 21:42:26 +00005093 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005094 kuid_t uid;
5095 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005096
Patrick McHardy24023452007-07-14 18:51:31 -07005097 ASSERT_RTNL();
5098
Wang Chendad9b332008-06-18 01:48:28 -07005099 dev->flags |= IFF_PROMISC;
5100 dev->promiscuity += inc;
5101 if (dev->promiscuity == 0) {
5102 /*
5103 * Avoid overflow.
5104 * If inc causes overflow, untouch promisc and return error.
5105 */
5106 if (inc < 0)
5107 dev->flags &= ~IFF_PROMISC;
5108 else {
5109 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005110 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5111 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005112 return -EOVERFLOW;
5113 }
5114 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005115 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005116 pr_info("device %s %s promiscuous mode\n",
5117 dev->name,
5118 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005119 if (audit_enabled) {
5120 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005121 audit_log(current->audit_context, GFP_ATOMIC,
5122 AUDIT_ANOM_PROMISCUOUS,
5123 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5124 dev->name, (dev->flags & IFF_PROMISC),
5125 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005126 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005127 from_kuid(&init_user_ns, uid),
5128 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005129 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005130 }
Patrick McHardy24023452007-07-14 18:51:31 -07005131
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005132 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005133 }
Wang Chendad9b332008-06-18 01:48:28 -07005134 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005135}
5136
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137/**
5138 * dev_set_promiscuity - update promiscuity count on a device
5139 * @dev: device
5140 * @inc: modifier
5141 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005142 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 * remains above zero the interface remains promiscuous. Once it hits zero
5144 * the device reverts back to normal filtering operation. A negative inc
5145 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005146 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147 */
Wang Chendad9b332008-06-18 01:48:28 -07005148int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149{
Eric Dumazetb536db92011-11-30 21:42:26 +00005150 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005151 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152
Wang Chendad9b332008-06-18 01:48:28 -07005153 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005154 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005155 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005156 if (dev->flags != old_flags)
5157 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005158 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005160EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161
5162/**
5163 * dev_set_allmulti - update allmulti count on a device
5164 * @dev: device
5165 * @inc: modifier
5166 *
5167 * Add or remove reception of all multicast frames to a device. While the
5168 * count in the device remains above zero the interface remains listening
5169 * to all interfaces. Once it hits zero the device reverts back to normal
5170 * filtering operation. A negative @inc value is used to drop the counter
5171 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07005172 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173 */
5174
Wang Chendad9b332008-06-18 01:48:28 -07005175int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176{
Eric Dumazetb536db92011-11-30 21:42:26 +00005177 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178
Patrick McHardy24023452007-07-14 18:51:31 -07005179 ASSERT_RTNL();
5180
Linus Torvalds1da177e2005-04-16 15:20:36 -07005181 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005182 dev->allmulti += inc;
5183 if (dev->allmulti == 0) {
5184 /*
5185 * Avoid overflow.
5186 * If inc causes overflow, untouch allmulti and return error.
5187 */
5188 if (inc < 0)
5189 dev->flags &= ~IFF_ALLMULTI;
5190 else {
5191 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005192 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5193 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005194 return -EOVERFLOW;
5195 }
5196 }
Patrick McHardy24023452007-07-14 18:51:31 -07005197 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005198 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005199 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07005200 }
Wang Chendad9b332008-06-18 01:48:28 -07005201 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005202}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005203EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005204
5205/*
5206 * Upload unicast and multicast address lists to device and
5207 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005208 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005209 * are present.
5210 */
5211void __dev_set_rx_mode(struct net_device *dev)
5212{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005213 const struct net_device_ops *ops = dev->netdev_ops;
5214
Patrick McHardy4417da62007-06-27 01:28:10 -07005215 /* dev_open will call this function so the list will stay sane. */
5216 if (!(dev->flags&IFF_UP))
5217 return;
5218
5219 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005220 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005221
Jiri Pirko01789342011-08-16 06:29:00 +00005222 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005223 /* Unicast addresses changes may only happen under the rtnl,
5224 * therefore calling __dev_set_promiscuity here is safe.
5225 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005226 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005227 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07005228 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005229 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005230 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07005231 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005232 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005233 }
Jiri Pirko01789342011-08-16 06:29:00 +00005234
5235 if (ops->ndo_set_rx_mode)
5236 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005237}
5238
5239void dev_set_rx_mode(struct net_device *dev)
5240{
David S. Millerb9e40852008-07-15 00:15:08 -07005241 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005242 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005243 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244}
5245
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005246/**
5247 * dev_get_flags - get flags reported to userspace
5248 * @dev: device
5249 *
5250 * Get the combination of flag bits exported through APIs to userspace.
5251 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005252unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253{
Eric Dumazet95c96172012-04-15 05:58:06 +00005254 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255
5256 flags = (dev->flags & ~(IFF_PROMISC |
5257 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005258 IFF_RUNNING |
5259 IFF_LOWER_UP |
5260 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 (dev->gflags & (IFF_PROMISC |
5262 IFF_ALLMULTI));
5263
Stefan Rompfb00055a2006-03-20 17:09:11 -08005264 if (netif_running(dev)) {
5265 if (netif_oper_up(dev))
5266 flags |= IFF_RUNNING;
5267 if (netif_carrier_ok(dev))
5268 flags |= IFF_LOWER_UP;
5269 if (netif_dormant(dev))
5270 flags |= IFF_DORMANT;
5271 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272
5273 return flags;
5274}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005275EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276
Patrick McHardybd380812010-02-26 06:34:53 +00005277int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278{
Eric Dumazetb536db92011-11-30 21:42:26 +00005279 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005280 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281
Patrick McHardy24023452007-07-14 18:51:31 -07005282 ASSERT_RTNL();
5283
Linus Torvalds1da177e2005-04-16 15:20:36 -07005284 /*
5285 * Set the flags on our device.
5286 */
5287
5288 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5289 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5290 IFF_AUTOMEDIA)) |
5291 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5292 IFF_ALLMULTI));
5293
5294 /*
5295 * Load in the correct multicast list now the flags have changed.
5296 */
5297
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005298 if ((old_flags ^ flags) & IFF_MULTICAST)
5299 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005300
Patrick McHardy4417da62007-06-27 01:28:10 -07005301 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302
5303 /*
5304 * Have we downed the interface. We handle IFF_UP ourselves
5305 * according to user attempts to set it, rather than blindly
5306 * setting it.
5307 */
5308
5309 ret = 0;
5310 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005311 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312
5313 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005314 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 }
5316
Linus Torvalds1da177e2005-04-16 15:20:36 -07005317 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005318 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5319
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320 dev->gflags ^= IFF_PROMISC;
5321 dev_set_promiscuity(dev, inc);
5322 }
5323
5324 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5325 is important. Some (broken) drivers set IFF_PROMISC, when
5326 IFF_ALLMULTI is requested not asking us and not reporting.
5327 */
5328 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005329 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5330
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331 dev->gflags ^= IFF_ALLMULTI;
5332 dev_set_allmulti(dev, inc);
5333 }
5334
Patrick McHardybd380812010-02-26 06:34:53 +00005335 return ret;
5336}
5337
5338void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
5339{
5340 unsigned int changes = dev->flags ^ old_flags;
5341
5342 if (changes & IFF_UP) {
5343 if (dev->flags & IFF_UP)
5344 call_netdevice_notifiers(NETDEV_UP, dev);
5345 else
5346 call_netdevice_notifiers(NETDEV_DOWN, dev);
5347 }
5348
5349 if (dev->flags & IFF_UP &&
5350 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
5351 call_netdevice_notifiers(NETDEV_CHANGE, dev);
5352}
5353
5354/**
5355 * dev_change_flags - change device settings
5356 * @dev: device
5357 * @flags: device state flags
5358 *
5359 * Change settings on device based state flags. The flags are
5360 * in the userspace exported format.
5361 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005362int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005363{
Eric Dumazetb536db92011-11-30 21:42:26 +00005364 int ret;
5365 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005366
5367 ret = __dev_change_flags(dev, flags);
5368 if (ret < 0)
5369 return ret;
5370
5371 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07005372 if (changes)
5373 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374
Patrick McHardybd380812010-02-26 06:34:53 +00005375 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376 return ret;
5377}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005378EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005380/**
5381 * dev_set_mtu - Change maximum transfer unit
5382 * @dev: device
5383 * @new_mtu: new transfer unit
5384 *
5385 * Change the maximum transfer size of the network device.
5386 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387int dev_set_mtu(struct net_device *dev, int new_mtu)
5388{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005389 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005390 int err;
5391
5392 if (new_mtu == dev->mtu)
5393 return 0;
5394
5395 /* MTU must be positive. */
5396 if (new_mtu < 0)
5397 return -EINVAL;
5398
5399 if (!netif_device_present(dev))
5400 return -ENODEV;
5401
5402 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005403 if (ops->ndo_change_mtu)
5404 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005405 else
5406 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005407
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005408 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005409 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410 return err;
5411}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005412EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005414/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005415 * dev_set_group - Change group this device belongs to
5416 * @dev: device
5417 * @new_group: group this device should belong to
5418 */
5419void dev_set_group(struct net_device *dev, int new_group)
5420{
5421 dev->group = new_group;
5422}
5423EXPORT_SYMBOL(dev_set_group);
5424
5425/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005426 * dev_set_mac_address - Change Media Access Control Address
5427 * @dev: device
5428 * @sa: new address
5429 *
5430 * Change the hardware (MAC) address of the device
5431 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5433{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005434 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435 int err;
5436
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005437 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 return -EOPNOTSUPP;
5439 if (sa->sa_family != dev->type)
5440 return -EINVAL;
5441 if (!netif_device_present(dev))
5442 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005443 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005444 if (err)
5445 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005446 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005447 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005448 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005449 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005451EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005452
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005453/**
5454 * dev_change_carrier - Change device carrier
5455 * @dev: device
5456 * @new_carries: new value
5457 *
5458 * Change device carrier
5459 */
5460int dev_change_carrier(struct net_device *dev, bool new_carrier)
5461{
5462 const struct net_device_ops *ops = dev->netdev_ops;
5463
5464 if (!ops->ndo_change_carrier)
5465 return -EOPNOTSUPP;
5466 if (!netif_device_present(dev))
5467 return -ENODEV;
5468 return ops->ndo_change_carrier(dev, new_carrier);
5469}
5470EXPORT_SYMBOL(dev_change_carrier);
5471
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00005473 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07005474 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07005475static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476{
5477 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00005478 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479
5480 if (!dev)
5481 return -ENODEV;
5482
5483 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005484 case SIOCGIFFLAGS: /* Get interface flags */
5485 ifr->ifr_flags = (short) dev_get_flags(dev);
5486 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005488 case SIOCGIFMETRIC: /* Get the metric on the interface
5489 (currently unused) */
5490 ifr->ifr_metric = 0;
5491 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005492
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005493 case SIOCGIFMTU: /* Get the MTU of a device */
5494 ifr->ifr_mtu = dev->mtu;
5495 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005497 case SIOCGIFHWADDR:
5498 if (!dev->addr_len)
5499 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5500 else
5501 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5502 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5503 ifr->ifr_hwaddr.sa_family = dev->type;
5504 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005506 case SIOCGIFSLAVE:
5507 err = -EINVAL;
5508 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005509
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005510 case SIOCGIFMAP:
5511 ifr->ifr_map.mem_start = dev->mem_start;
5512 ifr->ifr_map.mem_end = dev->mem_end;
5513 ifr->ifr_map.base_addr = dev->base_addr;
5514 ifr->ifr_map.irq = dev->irq;
5515 ifr->ifr_map.dma = dev->dma;
5516 ifr->ifr_map.port = dev->if_port;
5517 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005518
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005519 case SIOCGIFINDEX:
5520 ifr->ifr_ifindex = dev->ifindex;
5521 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005522
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005523 case SIOCGIFTXQLEN:
5524 ifr->ifr_qlen = dev->tx_queue_len;
5525 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005526
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005527 default:
5528 /* dev_ioctl() should ensure this case
5529 * is never reached
5530 */
5531 WARN_ON(1);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005532 err = -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005533 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005534
5535 }
5536 return err;
5537}
5538
5539/*
5540 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5541 */
5542static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5543{
5544 int err;
5545 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005546 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005547
5548 if (!dev)
5549 return -ENODEV;
5550
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005551 ops = dev->netdev_ops;
5552
Jeff Garzik14e3e072007-10-08 00:06:32 -07005553 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005554 case SIOCSIFFLAGS: /* Set interface flags */
5555 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005556
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005557 case SIOCSIFMETRIC: /* Set the metric on the interface
5558 (currently unused) */
5559 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005560
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005561 case SIOCSIFMTU: /* Set the MTU of a device */
5562 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005563
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005564 case SIOCSIFHWADDR:
5565 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005567 case SIOCSIFHWBROADCAST:
5568 if (ifr->ifr_hwaddr.sa_family != dev->type)
5569 return -EINVAL;
5570 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5571 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5572 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5573 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005574
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005575 case SIOCSIFMAP:
5576 if (ops->ndo_set_config) {
5577 if (!netif_device_present(dev))
5578 return -ENODEV;
5579 return ops->ndo_set_config(dev, &ifr->ifr_map);
5580 }
5581 return -EOPNOTSUPP;
5582
5583 case SIOCADDMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005584 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005585 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5586 return -EINVAL;
5587 if (!netif_device_present(dev))
5588 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005589 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005590
5591 case SIOCDELMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005592 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005593 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5594 return -EINVAL;
5595 if (!netif_device_present(dev))
5596 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005597 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005598
5599 case SIOCSIFTXQLEN:
5600 if (ifr->ifr_qlen < 0)
5601 return -EINVAL;
5602 dev->tx_queue_len = ifr->ifr_qlen;
5603 return 0;
5604
5605 case SIOCSIFNAME:
5606 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5607 return dev_change_name(dev, ifr->ifr_newname);
5608
Richard Cochran4dc360c2011-10-19 17:00:35 -04005609 case SIOCSHWTSTAMP:
5610 err = net_hwtstamp_validate(ifr);
5611 if (err)
5612 return err;
5613 /* fall through */
5614
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005615 /*
5616 * Unknown or private ioctl
5617 */
5618 default:
5619 if ((cmd >= SIOCDEVPRIVATE &&
5620 cmd <= SIOCDEVPRIVATE + 15) ||
5621 cmd == SIOCBONDENSLAVE ||
5622 cmd == SIOCBONDRELEASE ||
5623 cmd == SIOCBONDSETHWADDR ||
5624 cmd == SIOCBONDSLAVEINFOQUERY ||
5625 cmd == SIOCBONDINFOQUERY ||
5626 cmd == SIOCBONDCHANGEACTIVE ||
5627 cmd == SIOCGMIIPHY ||
5628 cmd == SIOCGMIIREG ||
5629 cmd == SIOCSMIIREG ||
5630 cmd == SIOCBRADDIF ||
5631 cmd == SIOCBRDELIF ||
5632 cmd == SIOCSHWTSTAMP ||
5633 cmd == SIOCWANDEV) {
5634 err = -EOPNOTSUPP;
5635 if (ops->ndo_do_ioctl) {
5636 if (netif_device_present(dev))
5637 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5638 else
5639 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005641 } else
5642 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643
5644 }
5645 return err;
5646}
5647
5648/*
5649 * This function handles all "interface"-type I/O control requests. The actual
5650 * 'doing' part of this is dev_ifsioc above.
5651 */
5652
5653/**
5654 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005655 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656 * @cmd: command to issue
5657 * @arg: pointer to a struct ifreq in user space
5658 *
5659 * Issue ioctl functions to devices. This is normally called by the
5660 * user space syscall interfaces but can sometimes be useful for
5661 * other purposes. The return value is the return from the syscall if
5662 * positive or a negative errno code on error.
5663 */
5664
Eric W. Biederman881d9662007-09-17 11:56:21 -07005665int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666{
5667 struct ifreq ifr;
5668 int ret;
5669 char *colon;
5670
5671 /* One special case: SIOCGIFCONF takes ifconf argument
5672 and requires shared lock, because it sleeps writing
5673 to user space.
5674 */
5675
5676 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005677 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005678 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005679 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680 return ret;
5681 }
5682 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005683 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005684
5685 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5686 return -EFAULT;
5687
5688 ifr.ifr_name[IFNAMSIZ-1] = 0;
5689
5690 colon = strchr(ifr.ifr_name, ':');
5691 if (colon)
5692 *colon = 0;
5693
5694 /*
5695 * See which interface the caller is talking about.
5696 */
5697
5698 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005699 /*
5700 * These ioctl calls:
5701 * - can be done by all.
5702 * - atomic and do not require locking.
5703 * - return a value
5704 */
5705 case SIOCGIFFLAGS:
5706 case SIOCGIFMETRIC:
5707 case SIOCGIFMTU:
5708 case SIOCGIFHWADDR:
5709 case SIOCGIFSLAVE:
5710 case SIOCGIFMAP:
5711 case SIOCGIFINDEX:
5712 case SIOCGIFTXQLEN:
5713 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005714 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005715 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005716 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005717 if (!ret) {
5718 if (colon)
5719 *colon = ':';
5720 if (copy_to_user(arg, &ifr,
5721 sizeof(struct ifreq)))
5722 ret = -EFAULT;
5723 }
5724 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005725
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005726 case SIOCETHTOOL:
5727 dev_load(net, ifr.ifr_name);
5728 rtnl_lock();
5729 ret = dev_ethtool(net, &ifr);
5730 rtnl_unlock();
5731 if (!ret) {
5732 if (colon)
5733 *colon = ':';
5734 if (copy_to_user(arg, &ifr,
5735 sizeof(struct ifreq)))
5736 ret = -EFAULT;
5737 }
5738 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005739
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005740 /*
5741 * These ioctl calls:
5742 * - require superuser power.
5743 * - require strict serialization.
5744 * - return a value
5745 */
5746 case SIOCGMIIPHY:
5747 case SIOCGMIIREG:
5748 case SIOCSIFNAME:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005749 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005750 return -EPERM;
5751 dev_load(net, ifr.ifr_name);
5752 rtnl_lock();
5753 ret = dev_ifsioc(net, &ifr, cmd);
5754 rtnl_unlock();
5755 if (!ret) {
5756 if (colon)
5757 *colon = ':';
5758 if (copy_to_user(arg, &ifr,
5759 sizeof(struct ifreq)))
5760 ret = -EFAULT;
5761 }
5762 return ret;
5763
5764 /*
5765 * These ioctl calls:
5766 * - require superuser power.
5767 * - require strict serialization.
5768 * - do not return a value
5769 */
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005770 case SIOCSIFMAP:
5771 case SIOCSIFTXQLEN:
5772 if (!capable(CAP_NET_ADMIN))
5773 return -EPERM;
5774 /* fall through */
5775 /*
5776 * These ioctl calls:
5777 * - require local superuser power.
5778 * - require strict serialization.
5779 * - do not return a value
5780 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005781 case SIOCSIFFLAGS:
5782 case SIOCSIFMETRIC:
5783 case SIOCSIFMTU:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005784 case SIOCSIFHWADDR:
5785 case SIOCSIFSLAVE:
5786 case SIOCADDMULTI:
5787 case SIOCDELMULTI:
5788 case SIOCSIFHWBROADCAST:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005789 case SIOCSMIIREG:
5790 case SIOCBONDENSLAVE:
5791 case SIOCBONDRELEASE:
5792 case SIOCBONDSETHWADDR:
5793 case SIOCBONDCHANGEACTIVE:
5794 case SIOCBRADDIF:
5795 case SIOCBRDELIF:
5796 case SIOCSHWTSTAMP:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005797 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005798 return -EPERM;
5799 /* fall through */
5800 case SIOCBONDSLAVEINFOQUERY:
5801 case SIOCBONDINFOQUERY:
5802 dev_load(net, ifr.ifr_name);
5803 rtnl_lock();
5804 ret = dev_ifsioc(net, &ifr, cmd);
5805 rtnl_unlock();
5806 return ret;
5807
5808 case SIOCGIFMEM:
5809 /* Get the per device memory space. We can add this but
5810 * currently do not support it */
5811 case SIOCSIFMEM:
5812 /* Set the per device memory buffer space.
5813 * Not applicable in our case */
5814 case SIOCSIFLINK:
Lifeng Sun41c31f32011-04-27 22:04:51 +00005815 return -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005816
5817 /*
5818 * Unknown or private ioctl.
5819 */
5820 default:
5821 if (cmd == SIOCWANDEV ||
5822 (cmd >= SIOCDEVPRIVATE &&
5823 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005824 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005825 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005826 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005827 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005828 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005830 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005831 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005832 }
5833 /* Take care of Wireless Extensions */
5834 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5835 return wext_handle_ioctl(net, &ifr, cmd, arg);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005836 return -ENOTTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005837 }
5838}
5839
5840
5841/**
5842 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005843 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005844 *
5845 * Returns a suitable unique value for a new device interface
5846 * number. The caller must hold the rtnl semaphore or the
5847 * dev_base_lock to be sure it remains unique.
5848 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005849static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005851 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005852 for (;;) {
5853 if (++ifindex <= 0)
5854 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005855 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005856 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005857 }
5858}
5859
Linus Torvalds1da177e2005-04-16 15:20:36 -07005860/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005861static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005862
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005863static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005864{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005865 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005866}
5867
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005868static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005869{
Krishna Kumare93737b2009-12-08 22:26:02 +00005870 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005871
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005872 BUG_ON(dev_boot_phase);
5873 ASSERT_RTNL();
5874
Krishna Kumare93737b2009-12-08 22:26:02 +00005875 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005876 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005877 * for initialization unwind. Remove those
5878 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005879 */
5880 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005881 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5882 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005883
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005884 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005885 list_del(&dev->unreg_list);
5886 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005887 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005888 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005889 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005890 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005891
Octavian Purdila44345722010-12-13 12:44:07 +00005892 /* If device is running, close it first. */
5893 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005894
Octavian Purdila44345722010-12-13 12:44:07 +00005895 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005896 /* And unlink it from device chain. */
5897 unlist_netdevice(dev);
5898
5899 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005900 }
5901
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005902 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005903
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005904 list_for_each_entry(dev, head, unreg_list) {
5905 /* Shutdown queueing discipline. */
5906 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005907
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005908
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005909 /* Notify protocols, that we are about to destroy
5910 this device. They should clean all the things.
5911 */
5912 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5913
Patrick McHardya2835762010-02-26 06:34:51 +00005914 if (!dev->rtnl_link_ops ||
5915 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5916 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5917
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005918 /*
5919 * Flush the unicast and multicast chains
5920 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005921 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005922 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005923
5924 if (dev->netdev_ops->ndo_uninit)
5925 dev->netdev_ops->ndo_uninit(dev);
5926
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005927 /* Notifier chain MUST detach us all upper devices. */
5928 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005929
5930 /* Remove entries from kobject tree */
5931 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00005932#ifdef CONFIG_XPS
5933 /* Remove XPS queueing entries */
5934 netif_reset_xps_queues_gt(dev, 0);
5935#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005936 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005937
Eric W. Biederman850a5452011-10-13 22:25:23 +00005938 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005939
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005940 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005941 dev_put(dev);
5942}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005943
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005944static void rollback_registered(struct net_device *dev)
5945{
5946 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005947
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005948 list_add(&dev->unreg_list, &single);
5949 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005950 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005951}
5952
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005953static netdev_features_t netdev_fix_features(struct net_device *dev,
5954 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005955{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005956 /* Fix illegal checksum combinations */
5957 if ((features & NETIF_F_HW_CSUM) &&
5958 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005959 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005960 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5961 }
5962
Herbert Xub63365a2008-10-23 01:11:29 -07005963 /* Fix illegal SG+CSUM combinations. */
5964 if ((features & NETIF_F_SG) &&
5965 !(features & NETIF_F_ALL_CSUM)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005966 netdev_dbg(dev,
5967 "Dropping NETIF_F_SG since no checksum feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005968 features &= ~NETIF_F_SG;
5969 }
5970
5971 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005972 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005973 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005974 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005975 }
5976
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005977 /* TSO ECN requires that TSO is present as well. */
5978 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5979 features &= ~NETIF_F_TSO_ECN;
5980
Michał Mirosław212b5732011-02-15 16:59:16 +00005981 /* Software GSO depends on SG. */
5982 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005983 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005984 features &= ~NETIF_F_GSO;
5985 }
5986
Michał Mirosławacd11302011-01-24 15:45:15 -08005987 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005988 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005989 /* maybe split UFO into V4 and V6? */
5990 if (!((features & NETIF_F_GEN_CSUM) ||
5991 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5992 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005993 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005994 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005995 features &= ~NETIF_F_UFO;
5996 }
5997
5998 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005999 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08006000 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07006001 features &= ~NETIF_F_UFO;
6002 }
6003 }
6004
6005 return features;
6006}
Herbert Xub63365a2008-10-23 01:11:29 -07006007
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006008int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00006009{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006010 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00006011 int err = 0;
6012
Michał Mirosław87267482011-04-12 09:56:38 +00006013 ASSERT_RTNL();
6014
Michał Mirosław5455c692011-02-15 16:59:17 +00006015 features = netdev_get_wanted_features(dev);
6016
6017 if (dev->netdev_ops->ndo_fix_features)
6018 features = dev->netdev_ops->ndo_fix_features(dev, features);
6019
6020 /* driver might be less strict about feature dependencies */
6021 features = netdev_fix_features(dev, features);
6022
6023 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006024 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00006025
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006026 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6027 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00006028
6029 if (dev->netdev_ops->ndo_set_features)
6030 err = dev->netdev_ops->ndo_set_features(dev, features);
6031
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006032 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00006033 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006034 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6035 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006036 return -1;
6037 }
6038
6039 if (!err)
6040 dev->features = features;
6041
6042 return 1;
6043}
6044
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006045/**
6046 * netdev_update_features - recalculate device features
6047 * @dev: the device to check
6048 *
6049 * Recalculate dev->features set and send notifications if it
6050 * has changed. Should be called after driver or hardware dependent
6051 * conditions might have changed that influence the features.
6052 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006053void netdev_update_features(struct net_device *dev)
6054{
6055 if (__netdev_update_features(dev))
6056 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00006057}
6058EXPORT_SYMBOL(netdev_update_features);
6059
Linus Torvalds1da177e2005-04-16 15:20:36 -07006060/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00006061 * netdev_change_features - recalculate device features
6062 * @dev: the device to check
6063 *
6064 * Recalculate dev->features set and send notifications even
6065 * if they have not changed. Should be called instead of
6066 * netdev_update_features() if also dev->vlan_features might
6067 * have changed to allow the changes to be propagated to stacked
6068 * VLAN devices.
6069 */
6070void netdev_change_features(struct net_device *dev)
6071{
6072 __netdev_update_features(dev);
6073 netdev_features_change(dev);
6074}
6075EXPORT_SYMBOL(netdev_change_features);
6076
6077/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006078 * netif_stacked_transfer_operstate - transfer operstate
6079 * @rootdev: the root or lower level device to transfer state from
6080 * @dev: the device to transfer operstate to
6081 *
6082 * Transfer operational state from root to device. This is normally
6083 * called when a stacking relationship exists between the root
6084 * device and the device(a leaf device).
6085 */
6086void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6087 struct net_device *dev)
6088{
6089 if (rootdev->operstate == IF_OPER_DORMANT)
6090 netif_dormant_on(dev);
6091 else
6092 netif_dormant_off(dev);
6093
6094 if (netif_carrier_ok(rootdev)) {
6095 if (!netif_carrier_ok(dev))
6096 netif_carrier_on(dev);
6097 } else {
6098 if (netif_carrier_ok(dev))
6099 netif_carrier_off(dev);
6100 }
6101}
6102EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6103
Tom Herbertbf264142010-11-26 08:36:09 +00006104#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006105static int netif_alloc_rx_queues(struct net_device *dev)
6106{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006107 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006108 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006109
Tom Herbertbd25fa72010-10-18 18:00:16 +00006110 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006111
Tom Herbertbd25fa72010-10-18 18:00:16 +00006112 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6113 if (!rx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006114 pr_err("netdev: Unable to allocate %u rx queues\n", count);
Tom Herbertbd25fa72010-10-18 18:00:16 +00006115 return -ENOMEM;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006116 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00006117 dev->_rx = rx;
6118
Tom Herbertbd25fa72010-10-18 18:00:16 +00006119 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006120 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006121 return 0;
6122}
Tom Herbertbf264142010-11-26 08:36:09 +00006123#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006124
Changli Gaoaa942102010-12-04 02:31:41 +00006125static void netdev_init_one_queue(struct net_device *dev,
6126 struct netdev_queue *queue, void *_unused)
6127{
6128 /* Initialize queue lock */
6129 spin_lock_init(&queue->_xmit_lock);
6130 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6131 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006132 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006133 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006134#ifdef CONFIG_BQL
6135 dql_init(&queue->dql, HZ);
6136#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006137}
6138
Tom Herberte6484932010-10-18 18:04:39 +00006139static int netif_alloc_netdev_queues(struct net_device *dev)
6140{
6141 unsigned int count = dev->num_tx_queues;
6142 struct netdev_queue *tx;
6143
6144 BUG_ON(count < 1);
6145
6146 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
6147 if (!tx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006148 pr_err("netdev: Unable to allocate %u tx queues\n", count);
Tom Herberte6484932010-10-18 18:04:39 +00006149 return -ENOMEM;
6150 }
6151 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006152
Tom Herberte6484932010-10-18 18:04:39 +00006153 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6154 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006155
6156 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006157}
6158
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006159/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160 * register_netdevice - register a network device
6161 * @dev: device to register
6162 *
6163 * Take a completed network device structure and add it to the kernel
6164 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6165 * chain. 0 is returned on success. A negative errno code is returned
6166 * on a failure to set up the device, or if the name is a duplicate.
6167 *
6168 * Callers must hold the rtnl semaphore. You may want
6169 * register_netdev() instead of this.
6170 *
6171 * BUGS:
6172 * The locking appears insufficient to guarantee two parallel registers
6173 * will not get the same name.
6174 */
6175
6176int register_netdevice(struct net_device *dev)
6177{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006179 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006180
6181 BUG_ON(dev_boot_phase);
6182 ASSERT_RTNL();
6183
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006184 might_sleep();
6185
Linus Torvalds1da177e2005-04-16 15:20:36 -07006186 /* When net_device's are persistent, this will be fatal. */
6187 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006188 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006189
David S. Millerf1f28aa2008-07-15 00:08:33 -07006190 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006191 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006192
Linus Torvalds1da177e2005-04-16 15:20:36 -07006193 dev->iflink = -1;
6194
Gao feng828de4f2012-09-13 20:58:27 +00006195 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006196 if (ret < 0)
6197 goto out;
6198
Linus Torvalds1da177e2005-04-16 15:20:36 -07006199 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006200 if (dev->netdev_ops->ndo_init) {
6201 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006202 if (ret) {
6203 if (ret > 0)
6204 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006205 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006206 }
6207 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006208
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006209 ret = -EBUSY;
6210 if (!dev->ifindex)
6211 dev->ifindex = dev_new_index(net);
6212 else if (__dev_get_by_index(net, dev->ifindex))
6213 goto err_uninit;
6214
Linus Torvalds1da177e2005-04-16 15:20:36 -07006215 if (dev->iflink == -1)
6216 dev->iflink = dev->ifindex;
6217
Michał Mirosław5455c692011-02-15 16:59:17 +00006218 /* Transfer changeable features to wanted_features and enable
6219 * software offloads (GSO and GRO).
6220 */
6221 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006222 dev->features |= NETIF_F_SOFT_FEATURES;
6223 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006224
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006225 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00006226 if (!(dev->flags & IFF_LOOPBACK)) {
6227 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6228 if (dev->features & NETIF_F_ALL_CSUM) {
6229 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
6230 dev->features |= NETIF_F_NOCACHE_COPY;
6231 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006232 }
6233
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006234 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006235 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006236 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006237
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006238 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6239 ret = notifier_to_errno(ret);
6240 if (ret)
6241 goto err_uninit;
6242
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006243 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006244 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006245 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006246 dev->reg_state = NETREG_REGISTERED;
6247
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006248 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006249
Linus Torvalds1da177e2005-04-16 15:20:36 -07006250 /*
6251 * Default initial state at registry is that the
6252 * device is present.
6253 */
6254
6255 set_bit(__LINK_STATE_PRESENT, &dev->state);
6256
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006257 linkwatch_init_dev(dev);
6258
Linus Torvalds1da177e2005-04-16 15:20:36 -07006259 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006260 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006261 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006262 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006263
Jiri Pirko948b3372013-01-08 01:38:25 +00006264 /* If the device has permanent device address, driver should
6265 * set dev_addr and also addr_assign_type should be set to
6266 * NET_ADDR_PERM (default value).
6267 */
6268 if (dev->addr_assign_type == NET_ADDR_PERM)
6269 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6270
Linus Torvalds1da177e2005-04-16 15:20:36 -07006271 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006272 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006273 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006274 if (ret) {
6275 rollback_registered(dev);
6276 dev->reg_state = NETREG_UNREGISTERED;
6277 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006278 /*
6279 * Prevent userspace races by waiting until the network
6280 * device is fully setup before sending notifications.
6281 */
Patrick McHardya2835762010-02-26 06:34:51 +00006282 if (!dev->rtnl_link_ops ||
6283 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6284 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006285
6286out:
6287 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006288
6289err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006290 if (dev->netdev_ops->ndo_uninit)
6291 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006292 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006293}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006294EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295
6296/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006297 * init_dummy_netdev - init a dummy network device for NAPI
6298 * @dev: device to init
6299 *
6300 * This takes a network device structure and initialize the minimum
6301 * amount of fields so it can be used to schedule NAPI polls without
6302 * registering a full blown interface. This is to be used by drivers
6303 * that need to tie several hardware interfaces to a single NAPI
6304 * poll scheduler due to HW limitations.
6305 */
6306int init_dummy_netdev(struct net_device *dev)
6307{
6308 /* Clear everything. Note we don't initialize spinlocks
6309 * are they aren't supposed to be taken by any of the
6310 * NAPI code and this dummy netdev is supposed to be
6311 * only ever used for NAPI polls
6312 */
6313 memset(dev, 0, sizeof(struct net_device));
6314
6315 /* make sure we BUG if trying to hit standard
6316 * register/unregister code path
6317 */
6318 dev->reg_state = NETREG_DUMMY;
6319
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006320 /* NAPI wants this */
6321 INIT_LIST_HEAD(&dev->napi_list);
6322
6323 /* a dummy interface is started by default */
6324 set_bit(__LINK_STATE_PRESENT, &dev->state);
6325 set_bit(__LINK_STATE_START, &dev->state);
6326
Eric Dumazet29b44332010-10-11 10:22:12 +00006327 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6328 * because users of this 'device' dont need to change
6329 * its refcount.
6330 */
6331
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006332 return 0;
6333}
6334EXPORT_SYMBOL_GPL(init_dummy_netdev);
6335
6336
6337/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006338 * register_netdev - register a network device
6339 * @dev: device to register
6340 *
6341 * Take a completed network device structure and add it to the kernel
6342 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6343 * chain. 0 is returned on success. A negative errno code is returned
6344 * on a failure to set up the device, or if the name is a duplicate.
6345 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006346 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006347 * and expands the device name if you passed a format string to
6348 * alloc_netdev.
6349 */
6350int register_netdev(struct net_device *dev)
6351{
6352 int err;
6353
6354 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006355 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006356 rtnl_unlock();
6357 return err;
6358}
6359EXPORT_SYMBOL(register_netdev);
6360
Eric Dumazet29b44332010-10-11 10:22:12 +00006361int netdev_refcnt_read(const struct net_device *dev)
6362{
6363 int i, refcnt = 0;
6364
6365 for_each_possible_cpu(i)
6366 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6367 return refcnt;
6368}
6369EXPORT_SYMBOL(netdev_refcnt_read);
6370
Ben Hutchings2c530402012-07-10 10:55:09 +00006371/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006372 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006373 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006374 *
6375 * This is called when unregistering network devices.
6376 *
6377 * Any protocol or device that holds a reference should register
6378 * for netdevice notification, and cleanup and put back the
6379 * reference if they receive an UNREGISTER event.
6380 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006381 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382 */
6383static void netdev_wait_allrefs(struct net_device *dev)
6384{
6385 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006386 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006387
Eric Dumazete014deb2009-11-17 05:59:21 +00006388 linkwatch_forget_dev(dev);
6389
Linus Torvalds1da177e2005-04-16 15:20:36 -07006390 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006391 refcnt = netdev_refcnt_read(dev);
6392
6393 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006394 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006395 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396
6397 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006398 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006399
Eric Dumazet748e2d92012-08-22 21:50:59 +00006400 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006401 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006402 rtnl_lock();
6403
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006404 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006405 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6406 &dev->state)) {
6407 /* We must not have linkwatch events
6408 * pending on unregister. If this
6409 * happens, we simply run the queue
6410 * unscheduled, resulting in a noop
6411 * for this device.
6412 */
6413 linkwatch_run_queue();
6414 }
6415
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006416 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006417
6418 rebroadcast_time = jiffies;
6419 }
6420
6421 msleep(250);
6422
Eric Dumazet29b44332010-10-11 10:22:12 +00006423 refcnt = netdev_refcnt_read(dev);
6424
Linus Torvalds1da177e2005-04-16 15:20:36 -07006425 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006426 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6427 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006428 warning_time = jiffies;
6429 }
6430 }
6431}
6432
6433/* The sequence is:
6434 *
6435 * rtnl_lock();
6436 * ...
6437 * register_netdevice(x1);
6438 * register_netdevice(x2);
6439 * ...
6440 * unregister_netdevice(y1);
6441 * unregister_netdevice(y2);
6442 * ...
6443 * rtnl_unlock();
6444 * free_netdev(y1);
6445 * free_netdev(y2);
6446 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006447 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006448 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006449 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006450 * without deadlocking with linkwatch via keventd.
6451 * 2) Since we run with the RTNL semaphore not held, we can sleep
6452 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006453 *
6454 * We must not return until all unregister events added during
6455 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006457void netdev_run_todo(void)
6458{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006459 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460
Linus Torvalds1da177e2005-04-16 15:20:36 -07006461 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006462 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006463
6464 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006465
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006466
6467 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006468 if (!list_empty(&list))
6469 rcu_barrier();
6470
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471 while (!list_empty(&list)) {
6472 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006473 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006474 list_del(&dev->todo_list);
6475
Eric Dumazet748e2d92012-08-22 21:50:59 +00006476 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006477 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006478 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006479
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006480 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006481 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006482 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006483 dump_stack();
6484 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006485 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006486
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006487 dev->reg_state = NETREG_UNREGISTERED;
6488
Changli Gao152102c2010-03-30 20:16:22 +00006489 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006490
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006491 netdev_wait_allrefs(dev);
6492
6493 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006494 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006495 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6496 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006497 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006498
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006499 if (dev->destructor)
6500 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006501
6502 /* Free network device */
6503 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006505}
6506
Ben Hutchings3cfde792010-07-09 09:11:52 +00006507/* Convert net_device_stats to rtnl_link_stats64. They have the same
6508 * fields in the same order, with only the type differing.
6509 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006510void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6511 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006512{
6513#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006514 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6515 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006516#else
6517 size_t i, n = sizeof(*stats64) / sizeof(u64);
6518 const unsigned long *src = (const unsigned long *)netdev_stats;
6519 u64 *dst = (u64 *)stats64;
6520
6521 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6522 sizeof(*stats64) / sizeof(u64));
6523 for (i = 0; i < n; i++)
6524 dst[i] = src[i];
6525#endif
6526}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006527EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006528
Eric Dumazetd83345a2009-11-16 03:36:51 +00006529/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006530 * dev_get_stats - get network device statistics
6531 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006532 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006533 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006534 * Get network statistics from device. Return @storage.
6535 * The device driver may provide its own method by setting
6536 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6537 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006538 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006539struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6540 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006541{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006542 const struct net_device_ops *ops = dev->netdev_ops;
6543
Eric Dumazet28172732010-07-07 14:58:56 -07006544 if (ops->ndo_get_stats64) {
6545 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006546 ops->ndo_get_stats64(dev, storage);
6547 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006548 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006549 } else {
6550 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006551 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006552 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006553 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006554}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006555EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006556
Eric Dumazet24824a02010-10-02 06:11:55 +00006557struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006558{
Eric Dumazet24824a02010-10-02 06:11:55 +00006559 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006560
Eric Dumazet24824a02010-10-02 06:11:55 +00006561#ifdef CONFIG_NET_CLS_ACT
6562 if (queue)
6563 return queue;
6564 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6565 if (!queue)
6566 return NULL;
6567 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006568 queue->qdisc = &noop_qdisc;
6569 queue->qdisc_sleeping = &noop_qdisc;
6570 rcu_assign_pointer(dev->ingress_queue, queue);
6571#endif
6572 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006573}
6574
Eric Dumazet2c60db02012-09-16 09:17:26 +00006575static const struct ethtool_ops default_ethtool_ops;
6576
Linus Torvalds1da177e2005-04-16 15:20:36 -07006577/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006578 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006579 * @sizeof_priv: size of private data to allocate space for
6580 * @name: device name format string
6581 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006582 * @txqs: the number of TX subqueues to allocate
6583 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006584 *
6585 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006586 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006587 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006588 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006589struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6590 void (*setup)(struct net_device *),
6591 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006594 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006595 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006596
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006597 BUG_ON(strlen(name) >= sizeof(dev->name));
6598
Tom Herbert36909ea2011-01-09 19:36:31 +00006599 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006600 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006601 return NULL;
6602 }
6603
Tom Herbert36909ea2011-01-09 19:36:31 +00006604#ifdef CONFIG_RPS
6605 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006606 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006607 return NULL;
6608 }
6609#endif
6610
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006611 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006612 if (sizeof_priv) {
6613 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006614 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006615 alloc_size += sizeof_priv;
6616 }
6617 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006618 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07006620 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621 if (!p) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006622 pr_err("alloc_netdev: Unable to allocate device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006623 return NULL;
6624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006626 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006627 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006628
Eric Dumazet29b44332010-10-11 10:22:12 +00006629 dev->pcpu_refcnt = alloc_percpu(int);
6630 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00006631 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006632
Linus Torvalds1da177e2005-04-16 15:20:36 -07006633 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006634 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006635
Jiri Pirko22bedad32010-04-01 21:22:57 +00006636 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006637 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006638
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006639 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006640
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006641 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006642 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006643
Herbert Xud565b0a2008-12-15 23:38:52 -08006644 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006645 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006646 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006647 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006648 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006649 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006650
6651 dev->num_tx_queues = txqs;
6652 dev->real_num_tx_queues = txqs;
6653 if (netif_alloc_netdev_queues(dev))
6654 goto free_all;
6655
6656#ifdef CONFIG_RPS
6657 dev->num_rx_queues = rxqs;
6658 dev->real_num_rx_queues = rxqs;
6659 if (netif_alloc_rx_queues(dev))
6660 goto free_all;
6661#endif
6662
Linus Torvalds1da177e2005-04-16 15:20:36 -07006663 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006664 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006665 if (!dev->ethtool_ops)
6666 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006668
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006669free_all:
6670 free_netdev(dev);
6671 return NULL;
6672
Eric Dumazet29b44332010-10-11 10:22:12 +00006673free_pcpu:
6674 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00006675 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006676#ifdef CONFIG_RPS
6677 kfree(dev->_rx);
6678#endif
6679
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006680free_p:
6681 kfree(p);
6682 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683}
Tom Herbert36909ea2011-01-09 19:36:31 +00006684EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006685
6686/**
6687 * free_netdev - free network device
6688 * @dev: device
6689 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006690 * This function does the last stage of destroying an allocated device
6691 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006692 * If this is the last reference then it will be freed.
6693 */
6694void free_netdev(struct net_device *dev)
6695{
Herbert Xud565b0a2008-12-15 23:38:52 -08006696 struct napi_struct *p, *n;
6697
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006698 release_net(dev_net(dev));
6699
David S. Millere8a04642008-07-17 00:34:19 -07006700 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006701#ifdef CONFIG_RPS
6702 kfree(dev->_rx);
6703#endif
David S. Millere8a04642008-07-17 00:34:19 -07006704
Eric Dumazet33d480c2011-08-11 19:30:52 +00006705 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006706
Jiri Pirkof001fde2009-05-05 02:48:28 +00006707 /* Flush device addresses */
6708 dev_addr_flush(dev);
6709
Herbert Xud565b0a2008-12-15 23:38:52 -08006710 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6711 netif_napi_del(p);
6712
Eric Dumazet29b44332010-10-11 10:22:12 +00006713 free_percpu(dev->pcpu_refcnt);
6714 dev->pcpu_refcnt = NULL;
6715
Stephen Hemminger3041a062006-05-26 13:25:24 -07006716 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006717 if (dev->reg_state == NETREG_UNINITIALIZED) {
6718 kfree((char *)dev - dev->padded);
6719 return;
6720 }
6721
6722 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6723 dev->reg_state = NETREG_RELEASED;
6724
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006725 /* will free via device release */
6726 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006727}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006728EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006729
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006730/**
6731 * synchronize_net - Synchronize with packet receive processing
6732 *
6733 * Wait for packets currently being received to be done.
6734 * Does not block later packets from starting.
6735 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006736void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737{
6738 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006739 if (rtnl_is_locked())
6740 synchronize_rcu_expedited();
6741 else
6742 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006743}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006744EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006745
6746/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006747 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006749 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006750 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006751 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006752 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006753 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006754 *
6755 * Callers must hold the rtnl semaphore. You may want
6756 * unregister_netdev() instead of this.
6757 */
6758
Eric Dumazet44a08732009-10-27 07:03:04 +00006759void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006760{
Herbert Xua6620712007-12-12 19:21:56 -08006761 ASSERT_RTNL();
6762
Eric Dumazet44a08732009-10-27 07:03:04 +00006763 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006764 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006765 } else {
6766 rollback_registered(dev);
6767 /* Finish processing unregister after unlock */
6768 net_set_todo(dev);
6769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006770}
Eric Dumazet44a08732009-10-27 07:03:04 +00006771EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006772
6773/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006774 * unregister_netdevice_many - unregister many devices
6775 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006776 */
6777void unregister_netdevice_many(struct list_head *head)
6778{
6779 struct net_device *dev;
6780
6781 if (!list_empty(head)) {
6782 rollback_registered_many(head);
6783 list_for_each_entry(dev, head, unreg_list)
6784 net_set_todo(dev);
6785 }
6786}
Eric Dumazet63c80992009-10-27 07:06:49 +00006787EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006788
6789/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006790 * unregister_netdev - remove device from the kernel
6791 * @dev: device
6792 *
6793 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006794 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006795 *
6796 * This is just a wrapper for unregister_netdevice that takes
6797 * the rtnl semaphore. In general you want to use this and not
6798 * unregister_netdevice.
6799 */
6800void unregister_netdev(struct net_device *dev)
6801{
6802 rtnl_lock();
6803 unregister_netdevice(dev);
6804 rtnl_unlock();
6805}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006806EXPORT_SYMBOL(unregister_netdev);
6807
Eric W. Biedermance286d32007-09-12 13:53:49 +02006808/**
6809 * dev_change_net_namespace - move device to different nethost namespace
6810 * @dev: device
6811 * @net: network namespace
6812 * @pat: If not NULL name pattern to try if the current device name
6813 * is already taken in the destination network namespace.
6814 *
6815 * This function shuts down a device interface and moves it
6816 * to a new network namespace. On success 0 is returned, on
6817 * a failure a netagive errno code is returned.
6818 *
6819 * Callers must hold the rtnl semaphore.
6820 */
6821
6822int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6823{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006824 int err;
6825
6826 ASSERT_RTNL();
6827
6828 /* Don't allow namespace local devices to be moved. */
6829 err = -EINVAL;
6830 if (dev->features & NETIF_F_NETNS_LOCAL)
6831 goto out;
6832
6833 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006834 if (dev->reg_state != NETREG_REGISTERED)
6835 goto out;
6836
6837 /* Get out if there is nothing todo */
6838 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006839 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006840 goto out;
6841
6842 /* Pick the destination device name, and ensure
6843 * we can use it in the destination network namespace.
6844 */
6845 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006846 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006847 /* We get here if we can't use the current device name */
6848 if (!pat)
6849 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006850 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006851 goto out;
6852 }
6853
6854 /*
6855 * And now a mini version of register_netdevice unregister_netdevice.
6856 */
6857
6858 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006859 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006860
6861 /* And unlink it from device chain */
6862 err = -ENODEV;
6863 unlist_netdevice(dev);
6864
6865 synchronize_net();
6866
6867 /* Shutdown queueing discipline. */
6868 dev_shutdown(dev);
6869
6870 /* Notify protocols, that we are about to destroy
6871 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006872
6873 Note that dev->reg_state stays at NETREG_REGISTERED.
6874 This is wanted because this way 8021q and macvlan know
6875 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006876 */
6877 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006878 rcu_barrier();
6879 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00006880 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006881
6882 /*
6883 * Flush the unicast and multicast chains
6884 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006885 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006886 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006887
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006888 /* Send a netdev-removed uevent to the old namespace */
6889 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6890
Eric W. Biedermance286d32007-09-12 13:53:49 +02006891 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006892 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006893
Eric W. Biedermance286d32007-09-12 13:53:49 +02006894 /* If there is an ifindex conflict assign a new one */
6895 if (__dev_get_by_index(net, dev->ifindex)) {
6896 int iflink = (dev->iflink == dev->ifindex);
6897 dev->ifindex = dev_new_index(net);
6898 if (iflink)
6899 dev->iflink = dev->ifindex;
6900 }
6901
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006902 /* Send a netdev-add uevent to the new namespace */
6903 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6904
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006905 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006906 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006907 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006908
6909 /* Add the device back in the hashes */
6910 list_netdevice(dev);
6911
6912 /* Notify protocols, that a new device appeared. */
6913 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6914
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006915 /*
6916 * Prevent userspace races by waiting until the network
6917 * device is fully setup before sending notifications.
6918 */
6919 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6920
Eric W. Biedermance286d32007-09-12 13:53:49 +02006921 synchronize_net();
6922 err = 0;
6923out:
6924 return err;
6925}
Johannes Berg463d0182009-07-14 00:33:35 +02006926EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006927
Linus Torvalds1da177e2005-04-16 15:20:36 -07006928static int dev_cpu_callback(struct notifier_block *nfb,
6929 unsigned long action,
6930 void *ocpu)
6931{
6932 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006933 struct sk_buff *skb;
6934 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6935 struct softnet_data *sd, *oldsd;
6936
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006937 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006938 return NOTIFY_OK;
6939
6940 local_irq_disable();
6941 cpu = smp_processor_id();
6942 sd = &per_cpu(softnet_data, cpu);
6943 oldsd = &per_cpu(softnet_data, oldcpu);
6944
6945 /* Find end of our completion_queue. */
6946 list_skb = &sd->completion_queue;
6947 while (*list_skb)
6948 list_skb = &(*list_skb)->next;
6949 /* Append completion queue from offline CPU. */
6950 *list_skb = oldsd->completion_queue;
6951 oldsd->completion_queue = NULL;
6952
Linus Torvalds1da177e2005-04-16 15:20:36 -07006953 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006954 if (oldsd->output_queue) {
6955 *sd->output_queue_tailp = oldsd->output_queue;
6956 sd->output_queue_tailp = oldsd->output_queue_tailp;
6957 oldsd->output_queue = NULL;
6958 oldsd->output_queue_tailp = &oldsd->output_queue;
6959 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006960 /* Append NAPI poll list from offline CPU. */
6961 if (!list_empty(&oldsd->poll_list)) {
6962 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6963 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6964 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006965
6966 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6967 local_irq_enable();
6968
6969 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006970 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6971 netif_rx(skb);
6972 input_queue_head_incr(oldsd);
6973 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006974 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006975 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006976 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006978
6979 return NOTIFY_OK;
6980}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006981
6982
Herbert Xu7f353bf2007-08-10 15:47:58 -07006983/**
Herbert Xub63365a2008-10-23 01:11:29 -07006984 * netdev_increment_features - increment feature set by one
6985 * @all: current feature set
6986 * @one: new feature set
6987 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006988 *
6989 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006990 * @one to the master device with current feature set @all. Will not
6991 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006992 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006993netdev_features_t netdev_increment_features(netdev_features_t all,
6994 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006995{
Michał Mirosław1742f182011-04-22 06:31:16 +00006996 if (mask & NETIF_F_GEN_CSUM)
6997 mask |= NETIF_F_ALL_CSUM;
6998 mask |= NETIF_F_VLAN_CHALLENGED;
6999
7000 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7001 all &= one | ~NETIF_F_ALL_FOR_ALL;
7002
Michał Mirosław1742f182011-04-22 06:31:16 +00007003 /* If one device supports hw checksumming, set for all. */
7004 if (all & NETIF_F_GEN_CSUM)
7005 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007006
7007 return all;
7008}
Herbert Xub63365a2008-10-23 01:11:29 -07007009EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07007010
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007011static struct hlist_head *netdev_create_hash(void)
7012{
7013 int i;
7014 struct hlist_head *hash;
7015
7016 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7017 if (hash != NULL)
7018 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7019 INIT_HLIST_HEAD(&hash[i]);
7020
7021 return hash;
7022}
7023
Eric W. Biederman881d9662007-09-17 11:56:21 -07007024/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07007025static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007026{
Rustad, Mark D734b6542012-07-18 09:06:07 +00007027 if (net != &init_net)
7028 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07007029
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007030 net->dev_name_head = netdev_create_hash();
7031 if (net->dev_name_head == NULL)
7032 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007033
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007034 net->dev_index_head = netdev_create_hash();
7035 if (net->dev_index_head == NULL)
7036 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007037
7038 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07007039
7040err_idx:
7041 kfree(net->dev_name_head);
7042err_name:
7043 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07007044}
7045
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007046/**
7047 * netdev_drivername - network driver for the device
7048 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07007049 *
7050 * Determine network driver for device.
7051 */
David S. Miller3019de12011-06-06 16:41:33 -07007052const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07007053{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07007054 const struct device_driver *driver;
7055 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07007056 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07007057
7058 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007059 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07007060 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007061
7062 driver = parent->driver;
7063 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07007064 return driver->name;
7065 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007066}
7067
Joe Perchesb004ff42012-09-12 20:12:19 -07007068static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00007069 struct va_format *vaf)
7070{
7071 int r;
7072
Joe Perchesb004ff42012-09-12 20:12:19 -07007073 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07007074 r = dev_printk_emit(level[1] - '0',
7075 dev->dev.parent,
7076 "%s %s %s: %pV",
7077 dev_driver_string(dev->dev.parent),
7078 dev_name(dev->dev.parent),
7079 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007080 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00007081 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007082 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00007083 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007084 }
Joe Perches256df2f2010-06-27 01:02:35 +00007085
7086 return r;
7087}
7088
7089int netdev_printk(const char *level, const struct net_device *dev,
7090 const char *format, ...)
7091{
7092 struct va_format vaf;
7093 va_list args;
7094 int r;
7095
7096 va_start(args, format);
7097
7098 vaf.fmt = format;
7099 vaf.va = &args;
7100
7101 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007102
Joe Perches256df2f2010-06-27 01:02:35 +00007103 va_end(args);
7104
7105 return r;
7106}
7107EXPORT_SYMBOL(netdev_printk);
7108
7109#define define_netdev_printk_level(func, level) \
7110int func(const struct net_device *dev, const char *fmt, ...) \
7111{ \
7112 int r; \
7113 struct va_format vaf; \
7114 va_list args; \
7115 \
7116 va_start(args, fmt); \
7117 \
7118 vaf.fmt = fmt; \
7119 vaf.va = &args; \
7120 \
7121 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007122 \
Joe Perches256df2f2010-06-27 01:02:35 +00007123 va_end(args); \
7124 \
7125 return r; \
7126} \
7127EXPORT_SYMBOL(func);
7128
7129define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7130define_netdev_printk_level(netdev_alert, KERN_ALERT);
7131define_netdev_printk_level(netdev_crit, KERN_CRIT);
7132define_netdev_printk_level(netdev_err, KERN_ERR);
7133define_netdev_printk_level(netdev_warn, KERN_WARNING);
7134define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7135define_netdev_printk_level(netdev_info, KERN_INFO);
7136
Pavel Emelyanov46650792007-10-08 20:38:39 -07007137static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007138{
7139 kfree(net->dev_name_head);
7140 kfree(net->dev_index_head);
7141}
7142
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007143static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007144 .init = netdev_init,
7145 .exit = netdev_exit,
7146};
7147
Pavel Emelyanov46650792007-10-08 20:38:39 -07007148static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007149{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007150 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007151 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007152 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007153 * initial network namespace
7154 */
7155 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007156 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007157 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007158 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007159
7160 /* Ignore unmoveable devices (i.e. loopback) */
7161 if (dev->features & NETIF_F_NETNS_LOCAL)
7162 continue;
7163
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007164 /* Leave virtual devices for the generic cleanup */
7165 if (dev->rtnl_link_ops)
7166 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007167
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007168 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007169 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7170 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007171 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007172 pr_emerg("%s: failed to move %s to init_net: %d\n",
7173 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007174 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007175 }
7176 }
7177 rtnl_unlock();
7178}
7179
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007180static void __net_exit default_device_exit_batch(struct list_head *net_list)
7181{
7182 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007183 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007184 * Do this across as many network namespaces as possible to
7185 * improve batching efficiency.
7186 */
7187 struct net_device *dev;
7188 struct net *net;
7189 LIST_HEAD(dev_kill_list);
7190
7191 rtnl_lock();
7192 list_for_each_entry(net, net_list, exit_list) {
7193 for_each_netdev_reverse(net, dev) {
7194 if (dev->rtnl_link_ops)
7195 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7196 else
7197 unregister_netdevice_queue(dev, &dev_kill_list);
7198 }
7199 }
7200 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007201 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007202 rtnl_unlock();
7203}
7204
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007205static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007206 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007207 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007208};
7209
Linus Torvalds1da177e2005-04-16 15:20:36 -07007210/*
7211 * Initialize the DEV module. At boot time this walks the device list and
7212 * unhooks any devices that fail to initialise (normally hardware not
7213 * present) and leaves us with a valid list of present and active devices.
7214 *
7215 */
7216
7217/*
7218 * This is called single threaded during boot, so no need
7219 * to take the rtnl semaphore.
7220 */
7221static int __init net_dev_init(void)
7222{
7223 int i, rc = -ENOMEM;
7224
7225 BUG_ON(!dev_boot_phase);
7226
Linus Torvalds1da177e2005-04-16 15:20:36 -07007227 if (dev_proc_init())
7228 goto out;
7229
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007230 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007231 goto out;
7232
7233 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007234 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007235 INIT_LIST_HEAD(&ptype_base[i]);
7236
Vlad Yasevich62532da2012-11-15 08:49:10 +00007237 INIT_LIST_HEAD(&offload_base);
7238
Eric W. Biederman881d9662007-09-17 11:56:21 -07007239 if (register_pernet_subsys(&netdev_net_ops))
7240 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007241
7242 /*
7243 * Initialise the packet receive queues.
7244 */
7245
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007246 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007247 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007248
Changli Gaodee42872010-05-02 05:42:16 +00007249 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007250 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007251 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007252 sd->completion_queue = NULL;
7253 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007254 sd->output_queue = NULL;
7255 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007256#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007257 sd->csd.func = rps_trigger_softirq;
7258 sd->csd.info = sd;
7259 sd->csd.flags = 0;
7260 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007261#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007262
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007263 sd->backlog.poll = process_backlog;
7264 sd->backlog.weight = weight_p;
7265 sd->backlog.gro_list = NULL;
7266 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007267 }
7268
Linus Torvalds1da177e2005-04-16 15:20:36 -07007269 dev_boot_phase = 0;
7270
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007271 /* The loopback device is special if any other network devices
7272 * is present in a network namespace the loopback device must
7273 * be present. Since we now dynamically allocate and free the
7274 * loopback device ensure this invariant is maintained by
7275 * keeping the loopback device as the first device on the
7276 * list of network devices. Ensuring the loopback devices
7277 * is the first device that appears and the last network device
7278 * that disappears.
7279 */
7280 if (register_pernet_device(&loopback_net_ops))
7281 goto out;
7282
7283 if (register_pernet_device(&default_device_ops))
7284 goto out;
7285
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007286 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7287 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007288
7289 hotcpu_notifier(dev_cpu_callback, 0);
7290 dst_init();
7291 dev_mcast_init();
7292 rc = 0;
7293out:
7294 return rc;
7295}
7296
7297subsys_initcall(net_dev_init);
7298
Krishna Kumare88721f2009-02-18 17:55:02 -08007299static int __init initialize_hashrnd(void)
7300{
Tom Herbert0a9627f2010-03-16 08:03:29 +00007301 get_random_bytes(&hashrnd, sizeof(hashrnd));
Krishna Kumare88721f2009-02-18 17:55:02 -08007302 return 0;
7303}
7304
7305late_initcall_sync(initialize_hashrnd);
7306